2024-11-23 17:23:27,619 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@790da477 2024-11-23 17:23:27,637 main DEBUG Took 0.015419 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-23 17:23:27,638 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-23 17:23:27,638 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-23 17:23:27,640 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-23 17:23:27,642 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 17:23:27,677 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-23 17:23:27,713 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 17:23:27,718 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 17:23:27,719 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 17:23:27,719 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 17:23:27,720 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 17:23:27,721 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 17:23:27,722 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 17:23:27,723 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 17:23:27,723 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 17:23:27,724 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 17:23:27,725 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 17:23:27,726 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 17:23:27,726 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 17:23:27,727 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 17:23:27,727 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 17:23:27,729 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 17:23:27,730 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 17:23:27,731 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 17:23:27,732 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 17:23:27,732 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 17:23:27,733 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 17:23:27,733 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 17:23:27,734 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 17:23:27,735 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-23 17:23:27,735 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 17:23:27,736 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-23 17:23:27,738 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-23 17:23:27,740 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-23 17:23:27,743 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-23 17:23:27,753 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-23 17:23:27,756 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-23 17:23:27,756 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-23 17:23:27,784 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-23 17:23:27,791 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-23 17:23:27,794 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-23 17:23:27,795 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-23 17:23:27,796 main DEBUG createAppenders(={Console}) 2024-11-23 17:23:27,797 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@790da477 initialized 2024-11-23 17:23:27,798 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@790da477 2024-11-23 17:23:27,798 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@790da477 OK. 2024-11-23 17:23:27,799 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-23 17:23:27,800 main DEBUG OutputStream closed 2024-11-23 17:23:27,800 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-23 17:23:27,801 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-23 17:23:27,801 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@5a56cdac OK 2024-11-23 17:23:28,014 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-23 17:23:28,017 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-23 17:23:28,029 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-23 17:23:28,030 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-23 17:23:28,031 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-23 17:23:28,031 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-23 17:23:28,032 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-23 17:23:28,032 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-23 17:23:28,033 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-23 17:23:28,033 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-23 17:23:28,034 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-23 17:23:28,034 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-23 17:23:28,054 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-23 17:23:28,055 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-23 17:23:28,055 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-23 17:23:28,056 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-23 17:23:28,056 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-23 17:23:28,057 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-23 17:23:28,069 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-23 17:23:28,069 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6f63b475) with optional ClassLoader: null 2024-11-23 17:23:28,070 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-23 17:23:28,071 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6f63b475] started OK. 2024-11-23T17:23:28,097 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestExportSnapshot timeout: 13 mins 2024-11-23 17:23:28,102 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-23 17:23:28,102 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-23T17:23:28,630 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe 2024-11-23T17:23:28,631 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestSecureExportSnapshot timeout: 13 mins 2024-11-23T17:23:28,736 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-23T17:23:29,142 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-23T17:23:29,216 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b, deleteOnExit=true 2024-11-23T17:23:29,217 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-23T17:23:29,218 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/test.cache.data in system properties and HBase conf 2024-11-23T17:23:29,220 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/hadoop.tmp.dir in system properties and HBase conf 2024-11-23T17:23:29,221 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/hadoop.log.dir in system properties and HBase conf 2024-11-23T17:23:29,222 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-23T17:23:29,224 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-23T17:23:29,225 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-23T17:23:29,560 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-23T17:23:29,568 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-23T17:23:29,573 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-23T17:23:29,574 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-23T17:23:29,575 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-23T17:23:29,576 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-23T17:23:29,592 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-23T17:23:29,593 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-23T17:23:29,594 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-23T17:23:29,594 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-23T17:23:29,595 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/nfs.dump.dir in system properties and HBase conf 2024-11-23T17:23:29,595 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/java.io.tmpdir in system properties and HBase conf 2024-11-23T17:23:29,596 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-23T17:23:29,596 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-23T17:23:29,597 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-23T17:23:32,103 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-23T17:23:32,327 INFO [Time-limited test {}] log.Log(170): Logging initialized @6022ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-23T17:23:32,541 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T17:23:32,687 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T17:23:32,810 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T17:23:32,810 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T17:23:32,814 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-23T17:23:32,895 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T17:23:32,918 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@654c02d1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/hadoop.log.dir/,AVAILABLE} 2024-11-23T17:23:32,921 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5cd6ab6b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T17:23:33,413 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@12351f7e{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/java.io.tmpdir/jetty-localhost-44391-hadoop-hdfs-3_4_1-tests_jar-_-any-2333617198546332957/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-23T17:23:33,493 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@188d52a2{HTTP/1.1, (http/1.1)}{localhost:44391} 2024-11-23T17:23:33,494 INFO [Time-limited test {}] server.Server(415): Started @7190ms 2024-11-23T17:23:34,535 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T17:23:34,561 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T17:23:34,609 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T17:23:34,610 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T17:23:34,611 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-23T17:23:34,621 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3622d218{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/hadoop.log.dir/,AVAILABLE} 2024-11-23T17:23:34,622 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@413b124e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T17:23:34,806 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1ead95b8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/java.io.tmpdir/jetty-localhost-44969-hadoop-hdfs-3_4_1-tests_jar-_-any-10300088967866585153/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T17:23:34,809 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6106ba1a{HTTP/1.1, (http/1.1)}{localhost:44969} 2024-11-23T17:23:34,810 INFO [Time-limited test {}] server.Server(415): Started @8506ms 2024-11-23T17:23:34,984 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-23T17:23:35,354 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T17:23:35,367 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T17:23:35,433 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T17:23:35,433 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T17:23:35,433 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-23T17:23:35,441 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@339990ad{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/hadoop.log.dir/,AVAILABLE} 2024-11-23T17:23:35,442 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6583bccc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T17:23:35,621 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@531e55ae{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/java.io.tmpdir/jetty-localhost-44921-hadoop-hdfs-3_4_1-tests_jar-_-any-6667090084425759149/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T17:23:35,622 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6d8e07b1{HTTP/1.1, (http/1.1)}{localhost:44921} 2024-11-23T17:23:35,622 INFO [Time-limited test {}] server.Server(415): Started @9319ms 2024-11-23T17:23:35,628 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-23T17:23:35,804 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T17:23:35,811 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T17:23:35,821 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T17:23:35,821 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T17:23:35,822 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-23T17:23:35,825 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@20aa1485{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/hadoop.log.dir/,AVAILABLE} 2024-11-23T17:23:35,826 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@15a0cd2e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-23T17:23:35,988 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2da75756{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/java.io.tmpdir/jetty-localhost-44837-hadoop-hdfs-3_4_1-tests_jar-_-any-11391532009339473107/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T17:23:35,989 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@ce7b4c8{HTTP/1.1, (http/1.1)}{localhost:44837} 2024-11-23T17:23:35,989 INFO [Time-limited test {}] server.Server(415): Started @9686ms 2024-11-23T17:23:35,994 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-23T17:23:37,675 WARN [Thread-122 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data3/current/BP-931987288-172.17.0.2-1732382611165/current, will proceed with Du for space computation calculation, 2024-11-23T17:23:37,681 WARN [Thread-124 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data4/current/BP-931987288-172.17.0.2-1732382611165/current, will proceed with Du for space computation calculation, 2024-11-23T17:23:37,685 WARN [Thread-123 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data1/current/BP-931987288-172.17.0.2-1732382611165/current, will proceed with Du for space computation calculation, 2024-11-23T17:23:37,693 WARN [Thread-125 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data2/current/BP-931987288-172.17.0.2-1732382611165/current, will proceed with Du for space computation calculation, 2024-11-23T17:23:37,781 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-23T17:23:37,785 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-23T17:23:37,872 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xedf3cb0803e141a5 with lease ID 0x6c3ddfe6c6b9ff32: Processing first storage report for DS-b2901eba-e22f-4426-936a-68cae471a098 from datanode DatanodeRegistration(127.0.0.1:45743, datanodeUuid=2d4e195b-c31d-4b79-91a9-04141678a6f9, infoPort=41345, infoSecurePort=0, ipcPort=44097, storageInfo=lv=-57;cid=testClusterID;nsid=1187046870;c=1732382611165) 2024-11-23T17:23:37,874 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xedf3cb0803e141a5 with lease ID 0x6c3ddfe6c6b9ff32: from storage DS-b2901eba-e22f-4426-936a-68cae471a098 node DatanodeRegistration(127.0.0.1:45743, datanodeUuid=2d4e195b-c31d-4b79-91a9-04141678a6f9, infoPort=41345, infoSecurePort=0, ipcPort=44097, storageInfo=lv=-57;cid=testClusterID;nsid=1187046870;c=1732382611165), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-23T17:23:37,875 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x29f9504015b4a24e with lease ID 0x6c3ddfe6c6b9ff33: Processing first storage report for DS-fb8b94d4-e3db-4dcc-bda0-734fb35ddb07 from datanode DatanodeRegistration(127.0.0.1:44591, datanodeUuid=751a80af-2f15-4df2-a58c-7825bba66825, infoPort=44373, infoSecurePort=0, ipcPort=34763, storageInfo=lv=-57;cid=testClusterID;nsid=1187046870;c=1732382611165) 2024-11-23T17:23:37,875 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x29f9504015b4a24e with lease ID 0x6c3ddfe6c6b9ff33: from storage DS-fb8b94d4-e3db-4dcc-bda0-734fb35ddb07 node DatanodeRegistration(127.0.0.1:44591, datanodeUuid=751a80af-2f15-4df2-a58c-7825bba66825, infoPort=44373, infoSecurePort=0, ipcPort=34763, storageInfo=lv=-57;cid=testClusterID;nsid=1187046870;c=1732382611165), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T17:23:37,875 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xedf3cb0803e141a5 with lease ID 0x6c3ddfe6c6b9ff32: Processing first storage report for DS-7c9dc6a6-ea4c-4ec0-a394-cc6d2ba0e714 from datanode DatanodeRegistration(127.0.0.1:45743, datanodeUuid=2d4e195b-c31d-4b79-91a9-04141678a6f9, infoPort=41345, infoSecurePort=0, ipcPort=44097, storageInfo=lv=-57;cid=testClusterID;nsid=1187046870;c=1732382611165) 2024-11-23T17:23:37,876 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xedf3cb0803e141a5 with lease ID 0x6c3ddfe6c6b9ff32: from storage DS-7c9dc6a6-ea4c-4ec0-a394-cc6d2ba0e714 node DatanodeRegistration(127.0.0.1:45743, datanodeUuid=2d4e195b-c31d-4b79-91a9-04141678a6f9, infoPort=41345, infoSecurePort=0, ipcPort=44097, storageInfo=lv=-57;cid=testClusterID;nsid=1187046870;c=1732382611165), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T17:23:37,876 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x29f9504015b4a24e with lease ID 0x6c3ddfe6c6b9ff33: Processing first storage report for DS-d3729aac-e64e-4550-a169-6f4268c9ca8d from datanode DatanodeRegistration(127.0.0.1:44591, datanodeUuid=751a80af-2f15-4df2-a58c-7825bba66825, infoPort=44373, infoSecurePort=0, ipcPort=34763, storageInfo=lv=-57;cid=testClusterID;nsid=1187046870;c=1732382611165) 2024-11-23T17:23:37,876 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x29f9504015b4a24e with lease ID 0x6c3ddfe6c6b9ff33: from storage DS-d3729aac-e64e-4550-a169-6f4268c9ca8d node DatanodeRegistration(127.0.0.1:44591, datanodeUuid=751a80af-2f15-4df2-a58c-7825bba66825, infoPort=44373, infoSecurePort=0, ipcPort=34763, storageInfo=lv=-57;cid=testClusterID;nsid=1187046870;c=1732382611165), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-23T17:23:37,941 WARN [Thread-142 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data5/current/BP-931987288-172.17.0.2-1732382611165/current, will proceed with Du for space computation calculation, 2024-11-23T17:23:37,953 WARN [Thread-143 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data6/current/BP-931987288-172.17.0.2-1732382611165/current, will proceed with Du for space computation calculation, 2024-11-23T17:23:38,009 WARN [Thread-103 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-23T17:23:38,015 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9758795f6e06f0b with lease ID 0x6c3ddfe6c6b9ff34: Processing first storage report for DS-7a832cf7-34ef-46bd-be01-46509a8fec53 from datanode DatanodeRegistration(127.0.0.1:46761, datanodeUuid=a3f5f17c-2fc0-43ec-b0ce-1f425654c4a0, infoPort=33645, infoSecurePort=0, ipcPort=33341, storageInfo=lv=-57;cid=testClusterID;nsid=1187046870;c=1732382611165) 2024-11-23T17:23:38,016 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9758795f6e06f0b with lease ID 0x6c3ddfe6c6b9ff34: from storage DS-7a832cf7-34ef-46bd-be01-46509a8fec53 node DatanodeRegistration(127.0.0.1:46761, datanodeUuid=a3f5f17c-2fc0-43ec-b0ce-1f425654c4a0, infoPort=33645, infoSecurePort=0, ipcPort=33341, storageInfo=lv=-57;cid=testClusterID;nsid=1187046870;c=1732382611165), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T17:23:38,016 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9758795f6e06f0b with lease ID 0x6c3ddfe6c6b9ff34: Processing first storage report for DS-ebf12f0b-33f6-4300-a17d-db06abe67869 from datanode DatanodeRegistration(127.0.0.1:46761, datanodeUuid=a3f5f17c-2fc0-43ec-b0ce-1f425654c4a0, infoPort=33645, infoSecurePort=0, ipcPort=33341, storageInfo=lv=-57;cid=testClusterID;nsid=1187046870;c=1732382611165) 2024-11-23T17:23:38,016 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9758795f6e06f0b with lease ID 0x6c3ddfe6c6b9ff34: from storage DS-ebf12f0b-33f6-4300-a17d-db06abe67869 node DatanodeRegistration(127.0.0.1:46761, datanodeUuid=a3f5f17c-2fc0-43ec-b0ce-1f425654c4a0, infoPort=33645, infoSecurePort=0, ipcPort=33341, storageInfo=lv=-57;cid=testClusterID;nsid=1187046870;c=1732382611165), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-23T17:23:38,078 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe 2024-11-23T17:23:38,281 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/zookeeper_0, clientPort=57294, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-23T17:23:38,300 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=57294 2024-11-23T17:23:38,326 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T17:23:38,332 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T17:23:38,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741825_1001 (size=7) 2024-11-23T17:23:38,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741825_1001 (size=7) 2024-11-23T17:23:38,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741825_1001 (size=7) 2024-11-23T17:23:38,841 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf with version=8 2024-11-23T17:23:38,842 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/hbase-staging 2024-11-23T17:23:39,011 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-23T17:23:39,439 INFO [Time-limited test {}] client.ConnectionUtils(128): master/215bb12e999f:0 server-side Connection retries=45 2024-11-23T17:23:39,453 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T17:23:39,454 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-23T17:23:39,462 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-23T17:23:39,462 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T17:23:39,463 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-23T17:23:39,732 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-23T17:23:39,815 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-23T17:23:39,831 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-23T17:23:39,838 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-23T17:23:39,889 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 93458 (auto-detected) 2024-11-23T17:23:39,891 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-23T17:23:39,940 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39621 2024-11-23T17:23:39,995 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:39621 connecting to ZooKeeper ensemble=127.0.0.1:57294 2024-11-23T17:23:40,230 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:396210x0, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-23T17:23:40,239 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:39621-0x10168b8ee290000 connected 2024-11-23T17:23:40,415 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T17:23:40,419 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T17:23:40,445 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T17:23:40,451 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf, hbase.cluster.distributed=false 2024-11-23T17:23:40,524 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-23T17:23:40,534 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39621 2024-11-23T17:23:40,535 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39621 2024-11-23T17:23:40,536 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39621 2024-11-23T17:23:40,538 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39621 2024-11-23T17:23:40,540 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39621 2024-11-23T17:23:40,684 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/215bb12e999f:0 server-side Connection retries=45 2024-11-23T17:23:40,686 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T17:23:40,687 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-23T17:23:40,687 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-23T17:23:40,687 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T17:23:40,688 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-23T17:23:40,691 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-23T17:23:40,699 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-23T17:23:40,701 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45467 2024-11-23T17:23:40,704 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45467 connecting to ZooKeeper ensemble=127.0.0.1:57294 2024-11-23T17:23:40,706 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T17:23:40,709 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T17:23:40,819 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:454670x0, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-23T17:23:40,823 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:454670x0, quorum=127.0.0.1:57294, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T17:23:40,829 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-23T17:23:40,832 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45467-0x10168b8ee290001 connected 2024-11-23T17:23:40,862 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-23T17:23:40,866 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45467-0x10168b8ee290001, quorum=127.0.0.1:57294, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-23T17:23:40,874 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45467-0x10168b8ee290001, quorum=127.0.0.1:57294, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-23T17:23:40,878 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45467 2024-11-23T17:23:40,879 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45467 2024-11-23T17:23:40,884 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45467 2024-11-23T17:23:40,888 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45467 2024-11-23T17:23:40,892 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45467 2024-11-23T17:23:40,919 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/215bb12e999f:0 server-side Connection retries=45 2024-11-23T17:23:40,919 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T17:23:40,919 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-23T17:23:40,920 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-23T17:23:40,920 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T17:23:40,920 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-23T17:23:40,920 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-23T17:23:40,921 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-23T17:23:40,925 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44667 2024-11-23T17:23:40,928 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44667 connecting to ZooKeeper ensemble=127.0.0.1:57294 2024-11-23T17:23:40,929 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T17:23:40,933 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T17:23:40,959 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:446670x0, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-23T17:23:40,960 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:446670x0, quorum=127.0.0.1:57294, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T17:23:40,960 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44667-0x10168b8ee290002 connected 2024-11-23T17:23:40,961 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-23T17:23:40,966 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-23T17:23:40,968 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44667-0x10168b8ee290002, quorum=127.0.0.1:57294, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-23T17:23:40,970 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44667-0x10168b8ee290002, quorum=127.0.0.1:57294, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-23T17:23:40,972 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44667 2024-11-23T17:23:40,973 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44667 2024-11-23T17:23:40,973 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44667 2024-11-23T17:23:40,974 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44667 2024-11-23T17:23:40,974 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44667 2024-11-23T17:23:41,000 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/215bb12e999f:0 server-side Connection retries=45 2024-11-23T17:23:41,001 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T17:23:41,001 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-23T17:23:41,001 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-23T17:23:41,001 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-23T17:23:41,001 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-23T17:23:41,001 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-23T17:23:41,002 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-23T17:23:41,005 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41065 2024-11-23T17:23:41,008 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41065 connecting to ZooKeeper ensemble=127.0.0.1:57294 2024-11-23T17:23:41,010 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T17:23:41,014 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T17:23:41,033 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:410650x0, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-23T17:23:41,034 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:410650x0, quorum=127.0.0.1:57294, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T17:23:41,034 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41065-0x10168b8ee290003 connected 2024-11-23T17:23:41,035 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-23T17:23:41,038 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-23T17:23:41,041 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41065-0x10168b8ee290003, quorum=127.0.0.1:57294, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-23T17:23:41,045 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41065-0x10168b8ee290003, quorum=127.0.0.1:57294, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-23T17:23:41,060 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41065 2024-11-23T17:23:41,064 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41065 2024-11-23T17:23:41,072 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41065 2024-11-23T17:23:41,073 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41065 2024-11-23T17:23:41,076 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41065 2024-11-23T17:23:41,104 DEBUG [M:0;215bb12e999f:39621 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;215bb12e999f:39621 2024-11-23T17:23:41,121 INFO [master/215bb12e999f:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/215bb12e999f,39621,1732382619205 2024-11-23T17:23:41,151 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x10168b8ee290003, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T17:23:41,151 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45467-0x10168b8ee290001, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T17:23:41,152 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44667-0x10168b8ee290002, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T17:23:41,155 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T17:23:41,172 DEBUG [master/215bb12e999f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/215bb12e999f,39621,1732382619205 2024-11-23T17:23:41,222 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x10168b8ee290003, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-23T17:23:41,224 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45467-0x10168b8ee290001, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-23T17:23:41,224 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45467-0x10168b8ee290001, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T17:23:41,225 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44667-0x10168b8ee290002, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-23T17:23:41,225 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44667-0x10168b8ee290002, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T17:23:41,226 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x10168b8ee290003, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T17:23:41,228 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T17:23:41,232 DEBUG [master/215bb12e999f:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-23T17:23:41,240 INFO [master/215bb12e999f:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/215bb12e999f,39621,1732382619205 from backup master directory 2024-11-23T17:23:41,258 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44667-0x10168b8ee290002, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T17:23:41,258 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/215bb12e999f,39621,1732382619205 2024-11-23T17:23:41,259 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T17:23:41,259 WARN [master/215bb12e999f:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-23T17:23:41,259 INFO [master/215bb12e999f:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=215bb12e999f,39621,1732382619205 2024-11-23T17:23:41,263 INFO [master/215bb12e999f:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-23T17:23:41,265 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x10168b8ee290003, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T17:23:41,268 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45467-0x10168b8ee290001, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-23T17:23:41,268 INFO [master/215bb12e999f:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-23T17:23:41,379 DEBUG [master/215bb12e999f:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/hbase.id] with ID: abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d 2024-11-23T17:23:41,380 DEBUG [master/215bb12e999f:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.tmp/hbase.id 2024-11-23T17:23:41,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741826_1002 (size=42) 2024-11-23T17:23:41,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741826_1002 (size=42) 2024-11-23T17:23:41,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741826_1002 (size=42) 2024-11-23T17:23:41,910 DEBUG [master/215bb12e999f:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.tmp/hbase.id]:[hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/hbase.id] 2024-11-23T17:23:41,967 INFO [master/215bb12e999f:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T17:23:41,974 INFO [master/215bb12e999f:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-23T17:23:42,003 INFO [master/215bb12e999f:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 27ms. 2024-11-23T17:23:42,054 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x10168b8ee290003, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T17:23:42,054 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45467-0x10168b8ee290001, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T17:23:42,054 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T17:23:42,054 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44667-0x10168b8ee290002, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T17:23:42,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741827_1003 (size=196) 2024-11-23T17:23:42,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741827_1003 (size=196) 2024-11-23T17:23:42,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741827_1003 (size=196) 2024-11-23T17:23:42,121 INFO [master/215bb12e999f:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T17:23:42,123 INFO [master/215bb12e999f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-23T17:23:42,140 DEBUG [master/215bb12e999f:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:150) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:174) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:262) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:231) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:400) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:1003) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2535) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:613) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.lambda$tracedRunnable$2(TraceUtil.java:155) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T17:23:42,145 INFO [master/215bb12e999f:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-23T17:23:42,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741828_1004 (size=1189) 2024-11-23T17:23:42,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741828_1004 (size=1189) 2024-11-23T17:23:42,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741828_1004 (size=1189) 2024-11-23T17:23:42,228 INFO [master/215bb12e999f:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/MasterData/data/master/store 2024-11-23T17:23:42,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741829_1005 (size=34) 2024-11-23T17:23:42,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741829_1005 (size=34) 2024-11-23T17:23:42,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741829_1005 (size=34) 2024-11-23T17:23:42,279 INFO [master/215bb12e999f:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-23T17:23:42,283 DEBUG [master/215bb12e999f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:23:42,284 DEBUG [master/215bb12e999f:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-23T17:23:42,285 INFO [master/215bb12e999f:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T17:23:42,285 DEBUG [master/215bb12e999f:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T17:23:42,287 DEBUG [master/215bb12e999f:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-23T17:23:42,287 DEBUG [master/215bb12e999f:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T17:23:42,287 INFO [master/215bb12e999f:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T17:23:42,289 DEBUG [master/215bb12e999f:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732382622284Disabling compacts and flushes for region at 1732382622284Disabling writes for close at 1732382622287 (+3 ms)Writing region close event to WAL at 1732382622287Closed at 1732382622287 2024-11-23T17:23:42,292 WARN [master/215bb12e999f:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/MasterData/data/master/store/.initializing 2024-11-23T17:23:42,292 DEBUG [master/215bb12e999f:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/MasterData/WALs/215bb12e999f,39621,1732382619205 2024-11-23T17:23:42,303 INFO [master/215bb12e999f:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-23T17:23:42,319 INFO [master/215bb12e999f:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=215bb12e999f%2C39621%2C1732382619205, suffix=, logDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/MasterData/WALs/215bb12e999f,39621,1732382619205, archiveDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/MasterData/oldWALs, maxLogs=10 2024-11-23T17:23:42,344 DEBUG [master/215bb12e999f:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/MasterData/WALs/215bb12e999f,39621,1732382619205/215bb12e999f%2C39621%2C1732382619205.1732382622324, exclude list is [], retry=0 2024-11-23T17:23:42,364 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46761,DS-7a832cf7-34ef-46bd-be01-46509a8fec53,DISK] 2024-11-23T17:23:42,364 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45743,DS-b2901eba-e22f-4426-936a-68cae471a098,DISK] 2024-11-23T17:23:42,364 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44591,DS-fb8b94d4-e3db-4dcc-bda0-734fb35ddb07,DISK] 2024-11-23T17:23:42,367 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-11-23T17:23:42,408 INFO [master/215bb12e999f:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/MasterData/WALs/215bb12e999f,39621,1732382619205/215bb12e999f%2C39621%2C1732382619205.1732382622324 2024-11-23T17:23:42,410 DEBUG [master/215bb12e999f:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:44373:44373),(127.0.0.1/127.0.0.1:41345:41345),(127.0.0.1/127.0.0.1:33645:33645)] 2024-11-23T17:23:42,410 DEBUG [master/215bb12e999f:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-23T17:23:42,411 DEBUG [master/215bb12e999f:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:23:42,416 DEBUG [master/215bb12e999f:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T17:23:42,418 DEBUG [master/215bb12e999f:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T17:23:42,473 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T17:23:42,514 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-23T17:23:42,520 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:23:42,524 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T17:23:42,525 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T17:23:42,532 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-23T17:23:42,532 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:23:42,537 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T17:23:42,538 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T17:23:42,543 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-23T17:23:42,544 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:23:42,546 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T17:23:42,546 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-23T17:23:42,551 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-23T17:23:42,552 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:23:42,555 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T17:23:42,556 DEBUG [master/215bb12e999f:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T17:23:42,571 DEBUG [master/215bb12e999f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-23T17:23:42,573 DEBUG [master/215bb12e999f:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-23T17:23:42,582 DEBUG [master/215bb12e999f:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T17:23:42,583 DEBUG [master/215bb12e999f:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T17:23:42,589 DEBUG [master/215bb12e999f:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-23T17:23:42,598 DEBUG [master/215bb12e999f:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-23T17:23:42,614 DEBUG [master/215bb12e999f:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T17:23:42,616 INFO [master/215bb12e999f:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70885789, jitterRate=0.05628056824207306}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-23T17:23:42,626 DEBUG [master/215bb12e999f:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732382622438Initializing all the Stores at 1732382622441 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732382622441Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732382622442 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732382622443 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732382622443Cleaning up temporary data from old regions at 1732382622583 (+140 ms)Region opened successfully at 1732382622626 (+43 ms) 2024-11-23T17:23:42,627 INFO [master/215bb12e999f:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-23T17:23:42,692 DEBUG [master/215bb12e999f:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b72206e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=215bb12e999f/172.17.0.2:0 2024-11-23T17:23:42,736 INFO [master/215bb12e999f:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-23T17:23:42,752 INFO [master/215bb12e999f:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-23T17:23:42,753 INFO [master/215bb12e999f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-23T17:23:42,756 INFO [master/215bb12e999f:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-23T17:23:42,761 INFO [master/215bb12e999f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 4 msec 2024-11-23T17:23:42,771 INFO [master/215bb12e999f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 9 msec 2024-11-23T17:23:42,772 INFO [master/215bb12e999f:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-23T17:23:42,806 INFO [master/215bb12e999f:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-23T17:23:42,815 DEBUG [master/215bb12e999f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-23T17:23:42,853 DEBUG [master/215bb12e999f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-23T17:23:42,856 INFO [master/215bb12e999f:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-23T17:23:42,858 DEBUG [master/215bb12e999f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-23T17:23:42,885 DEBUG [master/215bb12e999f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-23T17:23:42,888 INFO [master/215bb12e999f:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-23T17:23:42,891 DEBUG [master/215bb12e999f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-23T17:23:42,956 DEBUG [master/215bb12e999f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-23T17:23:42,959 DEBUG [master/215bb12e999f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-23T17:23:42,980 DEBUG [master/215bb12e999f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-23T17:23:42,998 DEBUG [master/215bb12e999f:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-23T17:23:43,005 DEBUG [master/215bb12e999f:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-23T17:23:43,016 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44667-0x10168b8ee290002, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-23T17:23:43,016 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45467-0x10168b8ee290001, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-23T17:23:43,016 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-23T17:23:43,016 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44667-0x10168b8ee290002, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T17:23:43,016 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45467-0x10168b8ee290001, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T17:23:43,016 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T17:23:43,016 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x10168b8ee290003, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-23T17:23:43,016 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x10168b8ee290003, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T17:23:43,026 INFO [master/215bb12e999f:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=215bb12e999f,39621,1732382619205, sessionid=0x10168b8ee290000, setting cluster-up flag (Was=false) 2024-11-23T17:23:43,064 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T17:23:43,064 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x10168b8ee290003, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T17:23:43,065 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45467-0x10168b8ee290001, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T17:23:43,066 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44667-0x10168b8ee290002, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T17:23:43,100 DEBUG [master/215bb12e999f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-23T17:23:43,102 DEBUG [master/215bb12e999f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=215bb12e999f,39621,1732382619205 2024-11-23T17:23:43,128 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45467-0x10168b8ee290001, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T17:23:43,128 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x10168b8ee290003, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T17:23:43,128 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T17:23:43,128 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44667-0x10168b8ee290002, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T17:23:43,163 DEBUG [master/215bb12e999f:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-23T17:23:43,165 DEBUG [master/215bb12e999f:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=215bb12e999f,39621,1732382619205 2024-11-23T17:23:43,174 INFO [master/215bb12e999f:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-23T17:23:43,209 INFO [RS:0;215bb12e999f:45467 {}] regionserver.HRegionServer(746): ClusterId : abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d 2024-11-23T17:23:43,220 DEBUG [RS:0;215bb12e999f:45467 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-23T17:23:43,222 INFO [RS:1;215bb12e999f:44667 {}] regionserver.HRegionServer(746): ClusterId : abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d 2024-11-23T17:23:43,222 DEBUG [RS:1;215bb12e999f:44667 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-23T17:23:43,237 DEBUG [master/215bb12e999f:0:becomeActiveMaster {}] master.HMaster(3441): Registered master coprocessor service: service=AccessControlService 2024-11-23T17:23:43,244 INFO [master/215bb12e999f:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:23:43,244 INFO [master/215bb12e999f:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver loaded, priority=536870912. 2024-11-23T17:23:43,260 DEBUG [RS:1;215bb12e999f:44667 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-23T17:23:43,260 DEBUG [RS:1;215bb12e999f:44667 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-23T17:23:43,260 INFO [RS:2;215bb12e999f:41065 {}] regionserver.HRegionServer(746): ClusterId : abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d 2024-11-23T17:23:43,261 DEBUG [RS:2;215bb12e999f:41065 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-23T17:23:43,261 DEBUG [RS:0;215bb12e999f:45467 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-23T17:23:43,261 DEBUG [RS:0;215bb12e999f:45467 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-23T17:23:43,280 DEBUG [RS:1;215bb12e999f:44667 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-23T17:23:43,280 DEBUG [RS:0;215bb12e999f:45467 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-23T17:23:43,280 DEBUG [RS:2;215bb12e999f:41065 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-23T17:23:43,280 DEBUG [RS:2;215bb12e999f:41065 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-23T17:23:43,281 DEBUG [RS:0;215bb12e999f:45467 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2cd326d7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=215bb12e999f/172.17.0.2:0 2024-11-23T17:23:43,281 DEBUG [RS:1;215bb12e999f:44667 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6df0423b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=215bb12e999f/172.17.0.2:0 2024-11-23T17:23:43,291 DEBUG [RS:2;215bb12e999f:41065 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-23T17:23:43,292 DEBUG [RS:2;215bb12e999f:41065 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d7b4814, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=215bb12e999f/172.17.0.2:0 2024-11-23T17:23:43,321 DEBUG [RS:1;215bb12e999f:44667 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;215bb12e999f:44667 2024-11-23T17:23:43,326 INFO [RS:1;215bb12e999f:44667 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-23T17:23:43,326 INFO [RS:1;215bb12e999f:44667 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-23T17:23:43,326 DEBUG [RS:1;215bb12e999f:44667 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-11-23T17:23:43,327 INFO [RS:1;215bb12e999f:44667 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:23:43,327 DEBUG [RS:1;215bb12e999f:44667 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-23T17:23:43,330 DEBUG [RS:0;215bb12e999f:45467 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;215bb12e999f:45467 2024-11-23T17:23:43,331 INFO [RS:0;215bb12e999f:45467 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-23T17:23:43,331 INFO [RS:0;215bb12e999f:45467 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-23T17:23:43,332 DEBUG [RS:0;215bb12e999f:45467 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-11-23T17:23:43,332 INFO [RS:1;215bb12e999f:44667 {}] regionserver.HRegionServer(2659): reportForDuty to master=215bb12e999f,39621,1732382619205 with port=44667, startcode=1732382620918 2024-11-23T17:23:43,332 INFO [RS:0;215bb12e999f:45467 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:23:43,332 DEBUG [RS:0;215bb12e999f:45467 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-23T17:23:43,334 INFO [RS:0;215bb12e999f:45467 {}] regionserver.HRegionServer(2659): reportForDuty to master=215bb12e999f,39621,1732382619205 with port=45467, startcode=1732382620632 2024-11-23T17:23:43,338 DEBUG [RS:2;215bb12e999f:41065 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;215bb12e999f:41065 2024-11-23T17:23:43,338 INFO [RS:2;215bb12e999f:41065 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-23T17:23:43,338 INFO [RS:2;215bb12e999f:41065 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-23T17:23:43,338 DEBUG [RS:2;215bb12e999f:41065 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-11-23T17:23:43,338 INFO [RS:2;215bb12e999f:41065 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:23:43,338 DEBUG [RS:2;215bb12e999f:41065 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-23T17:23:43,339 INFO [RS:2;215bb12e999f:41065 {}] regionserver.HRegionServer(2659): reportForDuty to master=215bb12e999f,39621,1732382619205 with port=41065, startcode=1732382621000 2024-11-23T17:23:43,348 DEBUG [RS:0;215bb12e999f:45467 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-23T17:23:43,348 DEBUG [RS:2;215bb12e999f:41065 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-23T17:23:43,348 DEBUG [master/215bb12e999f:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-23T17:23:43,351 DEBUG [RS:1;215bb12e999f:44667 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-23T17:23:43,360 INFO [master/215bb12e999f:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-23T17:23:43,369 INFO [master/215bb12e999f:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-23T17:23:43,375 DEBUG [master/215bb12e999f:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 215bb12e999f,39621,1732382619205 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-23T17:23:43,383 DEBUG [master/215bb12e999f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/215bb12e999f:0, corePoolSize=5, maxPoolSize=5 2024-11-23T17:23:43,383 DEBUG [master/215bb12e999f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/215bb12e999f:0, corePoolSize=5, maxPoolSize=5 2024-11-23T17:23:43,383 DEBUG [master/215bb12e999f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/215bb12e999f:0, corePoolSize=5, maxPoolSize=5 2024-11-23T17:23:43,383 DEBUG [master/215bb12e999f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/215bb12e999f:0, corePoolSize=5, maxPoolSize=5 2024-11-23T17:23:43,383 DEBUG [master/215bb12e999f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/215bb12e999f:0, corePoolSize=10, maxPoolSize=10 2024-11-23T17:23:43,383 DEBUG [master/215bb12e999f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/215bb12e999f:0, corePoolSize=1, maxPoolSize=1 2024-11-23T17:23:43,384 DEBUG [master/215bb12e999f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/215bb12e999f:0, corePoolSize=2, maxPoolSize=2 2024-11-23T17:23:43,384 DEBUG [master/215bb12e999f:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/215bb12e999f:0, corePoolSize=1, maxPoolSize=1 2024-11-23T17:23:43,394 INFO [master/215bb12e999f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732382653394 2024-11-23T17:23:43,395 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39659, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-23T17:23:43,395 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43229, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-23T17:23:43,395 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44827, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-23T17:23:43,398 INFO [master/215bb12e999f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-23T17:23:43,399 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-23T17:23:43,399 INFO [master/215bb12e999f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-23T17:23:43,399 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-23T17:23:43,402 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39621 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-23T17:23:43,407 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:23:43,407 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-23T17:23:43,408 INFO [master/215bb12e999f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-23T17:23:43,409 INFO [master/215bb12e999f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-23T17:23:43,409 INFO [master/215bb12e999f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-23T17:23:43,410 INFO [master/215bb12e999f:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-23T17:23:43,411 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39621 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-23T17:23:43,412 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39621 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-23T17:23:43,417 INFO [master/215bb12e999f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-23T17:23:43,423 INFO [master/215bb12e999f:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-23T17:23:43,425 INFO [master/215bb12e999f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-23T17:23:43,425 INFO [master/215bb12e999f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-23T17:23:43,428 INFO [master/215bb12e999f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-23T17:23:43,428 INFO [master/215bb12e999f:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-23T17:23:43,430 DEBUG [master/215bb12e999f:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/215bb12e999f:0:becomeActiveMaster-HFileCleaner.large.0-1732382623430,5,FailOnTimeoutGroup] 2024-11-23T17:23:43,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741831_1007 (size=1321) 2024-11-23T17:23:43,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741831_1007 (size=1321) 2024-11-23T17:23:43,432 DEBUG [master/215bb12e999f:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/215bb12e999f:0:becomeActiveMaster-HFileCleaner.small.0-1732382623431,5,FailOnTimeoutGroup] 2024-11-23T17:23:43,432 INFO [master/215bb12e999f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-23T17:23:43,432 INFO [master/215bb12e999f:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-23T17:23:43,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741831_1007 (size=1321) 2024-11-23T17:23:43,433 DEBUG [RS:0;215bb12e999f:45467 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-23T17:23:43,433 DEBUG [RS:2;215bb12e999f:41065 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-23T17:23:43,433 WARN [RS:2;215bb12e999f:41065 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-23T17:23:43,433 WARN [RS:0;215bb12e999f:45467 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-23T17:23:43,434 DEBUG [RS:1;215bb12e999f:44667 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-23T17:23:43,434 INFO [master/215bb12e999f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-23T17:23:43,434 WARN [RS:1;215bb12e999f:44667 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-23T17:23:43,434 INFO [master/215bb12e999f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-23T17:23:43,434 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-23T17:23:43,435 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf 2024-11-23T17:23:43,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741832_1008 (size=32) 2024-11-23T17:23:43,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741832_1008 (size=32) 2024-11-23T17:23:43,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741832_1008 (size=32) 2024-11-23T17:23:43,455 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:23:43,459 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-23T17:23:43,463 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-23T17:23:43,463 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:23:43,464 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T17:23:43,465 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-23T17:23:43,468 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-23T17:23:43,468 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:23:43,470 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T17:23:43,470 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-23T17:23:43,476 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-23T17:23:43,476 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:23:43,481 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T17:23:43,481 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-23T17:23:43,484 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-23T17:23:43,484 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:23:43,485 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T17:23:43,485 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-23T17:23:43,487 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/hbase/meta/1588230740 2024-11-23T17:23:43,488 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/hbase/meta/1588230740 2024-11-23T17:23:43,493 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-23T17:23:43,493 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-23T17:23:43,495 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-23T17:23:43,498 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-23T17:23:43,503 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T17:23:43,504 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74135357, jitterRate=0.1047029048204422}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-23T17:23:43,510 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732382623456Initializing all the Stores at 1732382623458 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732382623458Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732382623459 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732382623459Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732382623459Cleaning up temporary data from old regions at 1732382623493 (+34 ms)Region opened successfully at 1732382623509 (+16 ms) 2024-11-23T17:23:43,510 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-23T17:23:43,510 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-23T17:23:43,510 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-23T17:23:43,510 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-23T17:23:43,510 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-23T17:23:43,512 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-23T17:23:43,513 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732382623510Disabling compacts and flushes for region at 1732382623510Disabling writes for close at 1732382623510Writing region close event to WAL at 1732382623512 (+2 ms)Closed at 1732382623512 2024-11-23T17:23:43,517 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-23T17:23:43,517 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-23T17:23:43,526 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-23T17:23:43,535 INFO [RS:0;215bb12e999f:45467 {}] regionserver.HRegionServer(2659): reportForDuty to master=215bb12e999f,39621,1732382619205 with port=45467, startcode=1732382620632 2024-11-23T17:23:43,535 INFO [RS:2;215bb12e999f:41065 {}] regionserver.HRegionServer(2659): reportForDuty to master=215bb12e999f,39621,1732382619205 with port=41065, startcode=1732382621000 2024-11-23T17:23:43,535 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-23T17:23:43,535 INFO [RS:1;215bb12e999f:44667 {}] regionserver.HRegionServer(2659): reportForDuty to master=215bb12e999f,39621,1732382619205 with port=44667, startcode=1732382620918 2024-11-23T17:23:43,537 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39621 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 215bb12e999f,45467,1732382620632 2024-11-23T17:23:43,539 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-23T17:23:43,539 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39621 {}] master.ServerManager(517): Registering regionserver=215bb12e999f,45467,1732382620632 2024-11-23T17:23:43,545 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39621 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 215bb12e999f,44667,1732382620918 2024-11-23T17:23:43,546 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39621 {}] master.ServerManager(517): Registering regionserver=215bb12e999f,44667,1732382620918 2024-11-23T17:23:43,546 DEBUG [RS:0;215bb12e999f:45467 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf 2024-11-23T17:23:43,546 DEBUG [RS:0;215bb12e999f:45467 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37605 2024-11-23T17:23:43,546 DEBUG [RS:0;215bb12e999f:45467 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-23T17:23:43,549 DEBUG [RS:1;215bb12e999f:44667 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf 2024-11-23T17:23:43,549 DEBUG [RS:1;215bb12e999f:44667 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37605 2024-11-23T17:23:43,549 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39621 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 215bb12e999f,41065,1732382621000 2024-11-23T17:23:43,549 DEBUG [RS:1;215bb12e999f:44667 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-23T17:23:43,549 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39621 {}] master.ServerManager(517): Registering regionserver=215bb12e999f,41065,1732382621000 2024-11-23T17:23:43,552 DEBUG [RS:2;215bb12e999f:41065 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf 2024-11-23T17:23:43,553 DEBUG [RS:2;215bb12e999f:41065 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37605 2024-11-23T17:23:43,553 DEBUG [RS:2;215bb12e999f:41065 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-23T17:23:43,601 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-23T17:23:43,688 DEBUG [RS:0;215bb12e999f:45467 {}] zookeeper.ZKUtil(111): regionserver:45467-0x10168b8ee290001, quorum=127.0.0.1:57294, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/215bb12e999f,45467,1732382620632 2024-11-23T17:23:43,688 WARN [RS:0;215bb12e999f:45467 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-23T17:23:43,688 INFO [RS:0;215bb12e999f:45467 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-23T17:23:43,689 DEBUG [RS:2;215bb12e999f:41065 {}] zookeeper.ZKUtil(111): regionserver:41065-0x10168b8ee290003, quorum=127.0.0.1:57294, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/215bb12e999f,41065,1732382621000 2024-11-23T17:23:43,689 DEBUG [RS:1;215bb12e999f:44667 {}] zookeeper.ZKUtil(111): regionserver:44667-0x10168b8ee290002, quorum=127.0.0.1:57294, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/215bb12e999f,44667,1732382620918 2024-11-23T17:23:43,689 DEBUG [RS:0;215bb12e999f:45467 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/WALs/215bb12e999f,45467,1732382620632 2024-11-23T17:23:43,689 WARN [RS:2;215bb12e999f:41065 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-23T17:23:43,689 WARN [RS:1;215bb12e999f:44667 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-23T17:23:43,689 INFO [RS:1;215bb12e999f:44667 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-23T17:23:43,689 INFO [RS:2;215bb12e999f:41065 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-23T17:23:43,689 DEBUG [RS:1;215bb12e999f:44667 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/WALs/215bb12e999f,44667,1732382620918 2024-11-23T17:23:43,689 DEBUG [RS:2;215bb12e999f:41065 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/WALs/215bb12e999f,41065,1732382621000 2024-11-23T17:23:43,690 WARN [215bb12e999f:39621 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-23T17:23:43,691 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [215bb12e999f,41065,1732382621000] 2024-11-23T17:23:43,691 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [215bb12e999f,44667,1732382620918] 2024-11-23T17:23:43,691 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [215bb12e999f,45467,1732382620632] 2024-11-23T17:23:43,738 INFO [RS:2;215bb12e999f:41065 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-23T17:23:43,742 INFO [RS:0;215bb12e999f:45467 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-23T17:23:43,768 INFO [RS:1;215bb12e999f:44667 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-23T17:23:43,796 INFO [RS:1;215bb12e999f:44667 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-23T17:23:43,797 INFO [RS:0;215bb12e999f:45467 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-23T17:23:43,798 INFO [RS:2;215bb12e999f:41065 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-23T17:23:43,805 INFO [RS:0;215bb12e999f:45467 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-23T17:23:43,806 INFO [RS:0;215bb12e999f:45467 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T17:23:43,806 INFO [RS:1;215bb12e999f:44667 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-23T17:23:43,807 INFO [RS:1;215bb12e999f:44667 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T17:23:43,807 INFO [RS:2;215bb12e999f:41065 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-23T17:23:43,807 INFO [RS:2;215bb12e999f:41065 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T17:23:43,809 INFO [RS:1;215bb12e999f:44667 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-23T17:23:43,809 INFO [RS:2;215bb12e999f:41065 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-23T17:23:43,812 INFO [RS:0;215bb12e999f:45467 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-23T17:23:43,817 INFO [RS:0;215bb12e999f:45467 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-23T17:23:43,817 INFO [RS:1;215bb12e999f:44667 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-23T17:23:43,822 INFO [RS:2;215bb12e999f:41065 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-23T17:23:43,837 INFO [RS:2;215bb12e999f:41065 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-23T17:23:43,838 DEBUG [RS:2;215bb12e999f:41065 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/215bb12e999f:0, corePoolSize=1, maxPoolSize=1 2024-11-23T17:23:43,838 INFO [RS:0;215bb12e999f:45467 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-23T17:23:43,838 DEBUG [RS:0;215bb12e999f:45467 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/215bb12e999f:0, corePoolSize=1, maxPoolSize=1 2024-11-23T17:23:43,838 DEBUG [RS:0;215bb12e999f:45467 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/215bb12e999f:0, corePoolSize=1, maxPoolSize=1 2024-11-23T17:23:43,839 DEBUG [RS:0;215bb12e999f:45467 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/215bb12e999f:0, corePoolSize=1, maxPoolSize=1 2024-11-23T17:23:43,839 DEBUG [RS:0;215bb12e999f:45467 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/215bb12e999f:0, corePoolSize=1, maxPoolSize=1 2024-11-23T17:23:43,839 DEBUG [RS:0;215bb12e999f:45467 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/215bb12e999f:0, corePoolSize=1, maxPoolSize=1 2024-11-23T17:23:43,839 DEBUG [RS:0;215bb12e999f:45467 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/215bb12e999f:0, corePoolSize=2, maxPoolSize=2 2024-11-23T17:23:43,839 DEBUG [RS:0;215bb12e999f:45467 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/215bb12e999f:0, corePoolSize=1, maxPoolSize=1 2024-11-23T17:23:43,839 DEBUG [RS:0;215bb12e999f:45467 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/215bb12e999f:0, corePoolSize=1, maxPoolSize=1 2024-11-23T17:23:43,839 DEBUG [RS:0;215bb12e999f:45467 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/215bb12e999f:0, corePoolSize=1, maxPoolSize=1 2024-11-23T17:23:43,840 DEBUG [RS:0;215bb12e999f:45467 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/215bb12e999f:0, corePoolSize=1, maxPoolSize=1 2024-11-23T17:23:43,840 DEBUG [RS:0;215bb12e999f:45467 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/215bb12e999f:0, corePoolSize=1, maxPoolSize=1 2024-11-23T17:23:43,840 DEBUG [RS:0;215bb12e999f:45467 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/215bb12e999f:0, corePoolSize=1, maxPoolSize=1 2024-11-23T17:23:43,840 DEBUG [RS:0;215bb12e999f:45467 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0, corePoolSize=3, maxPoolSize=3 2024-11-23T17:23:43,840 DEBUG [RS:0;215bb12e999f:45467 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/215bb12e999f:0, corePoolSize=3, maxPoolSize=3 2024-11-23T17:23:43,840 INFO [RS:1;215bb12e999f:44667 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-23T17:23:43,841 DEBUG [RS:1;215bb12e999f:44667 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/215bb12e999f:0, corePoolSize=1, maxPoolSize=1 2024-11-23T17:23:43,841 DEBUG [RS:1;215bb12e999f:44667 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/215bb12e999f:0, corePoolSize=1, maxPoolSize=1 2024-11-23T17:23:43,841 DEBUG [RS:1;215bb12e999f:44667 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/215bb12e999f:0, corePoolSize=1, maxPoolSize=1 2024-11-23T17:23:43,841 DEBUG [RS:1;215bb12e999f:44667 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/215bb12e999f:0, corePoolSize=1, maxPoolSize=1 2024-11-23T17:23:43,842 DEBUG [RS:1;215bb12e999f:44667 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/215bb12e999f:0, corePoolSize=1, maxPoolSize=1 2024-11-23T17:23:43,842 DEBUG [RS:1;215bb12e999f:44667 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/215bb12e999f:0, corePoolSize=2, maxPoolSize=2 2024-11-23T17:23:43,842 DEBUG [RS:1;215bb12e999f:44667 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/215bb12e999f:0, corePoolSize=1, maxPoolSize=1 2024-11-23T17:23:43,842 DEBUG [RS:1;215bb12e999f:44667 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/215bb12e999f:0, corePoolSize=1, maxPoolSize=1 2024-11-23T17:23:43,842 DEBUG [RS:1;215bb12e999f:44667 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/215bb12e999f:0, corePoolSize=1, maxPoolSize=1 2024-11-23T17:23:43,842 DEBUG [RS:1;215bb12e999f:44667 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/215bb12e999f:0, corePoolSize=1, maxPoolSize=1 2024-11-23T17:23:43,843 DEBUG [RS:1;215bb12e999f:44667 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/215bb12e999f:0, corePoolSize=1, maxPoolSize=1 2024-11-23T17:23:43,843 DEBUG [RS:1;215bb12e999f:44667 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/215bb12e999f:0, corePoolSize=1, maxPoolSize=1 2024-11-23T17:23:43,843 DEBUG [RS:1;215bb12e999f:44667 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0, corePoolSize=3, maxPoolSize=3 2024-11-23T17:23:43,843 DEBUG [RS:1;215bb12e999f:44667 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/215bb12e999f:0, corePoolSize=3, maxPoolSize=3 2024-11-23T17:23:43,846 DEBUG [RS:2;215bb12e999f:41065 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/215bb12e999f:0, corePoolSize=1, maxPoolSize=1 2024-11-23T17:23:43,846 DEBUG [RS:2;215bb12e999f:41065 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/215bb12e999f:0, corePoolSize=1, maxPoolSize=1 2024-11-23T17:23:43,846 DEBUG [RS:2;215bb12e999f:41065 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/215bb12e999f:0, corePoolSize=1, maxPoolSize=1 2024-11-23T17:23:43,847 DEBUG [RS:2;215bb12e999f:41065 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/215bb12e999f:0, corePoolSize=1, maxPoolSize=1 2024-11-23T17:23:43,847 DEBUG [RS:2;215bb12e999f:41065 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/215bb12e999f:0, corePoolSize=2, maxPoolSize=2 2024-11-23T17:23:43,847 DEBUG [RS:2;215bb12e999f:41065 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/215bb12e999f:0, corePoolSize=1, maxPoolSize=1 2024-11-23T17:23:43,847 DEBUG [RS:2;215bb12e999f:41065 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/215bb12e999f:0, corePoolSize=1, maxPoolSize=1 2024-11-23T17:23:43,848 DEBUG [RS:2;215bb12e999f:41065 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/215bb12e999f:0, corePoolSize=1, maxPoolSize=1 2024-11-23T17:23:43,848 DEBUG [RS:2;215bb12e999f:41065 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/215bb12e999f:0, corePoolSize=1, maxPoolSize=1 2024-11-23T17:23:43,848 DEBUG [RS:2;215bb12e999f:41065 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/215bb12e999f:0, corePoolSize=1, maxPoolSize=1 2024-11-23T17:23:43,848 DEBUG [RS:2;215bb12e999f:41065 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/215bb12e999f:0, corePoolSize=1, maxPoolSize=1 2024-11-23T17:23:43,849 DEBUG [RS:2;215bb12e999f:41065 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0, corePoolSize=3, maxPoolSize=3 2024-11-23T17:23:43,849 DEBUG [RS:2;215bb12e999f:41065 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/215bb12e999f:0, corePoolSize=3, maxPoolSize=3 2024-11-23T17:23:43,857 INFO [RS:0;215bb12e999f:45467 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-23T17:23:43,858 INFO [RS:0;215bb12e999f:45467 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-23T17:23:43,858 INFO [RS:0;215bb12e999f:45467 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T17:23:43,858 INFO [RS:0;215bb12e999f:45467 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-23T17:23:43,858 INFO [RS:0;215bb12e999f:45467 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-23T17:23:43,858 INFO [RS:0;215bb12e999f:45467 {}] hbase.ChoreService(168): Chore ScheduledChore name=215bb12e999f,45467,1732382620632-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-23T17:23:43,865 INFO [RS:1;215bb12e999f:44667 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-23T17:23:43,865 INFO [RS:1;215bb12e999f:44667 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-23T17:23:43,865 INFO [RS:1;215bb12e999f:44667 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T17:23:43,866 INFO [RS:1;215bb12e999f:44667 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-23T17:23:43,866 INFO [RS:1;215bb12e999f:44667 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-23T17:23:43,866 INFO [RS:1;215bb12e999f:44667 {}] hbase.ChoreService(168): Chore ScheduledChore name=215bb12e999f,44667,1732382620918-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-23T17:23:43,888 INFO [RS:0;215bb12e999f:45467 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-23T17:23:43,891 INFO [RS:0;215bb12e999f:45467 {}] hbase.ChoreService(168): Chore ScheduledChore name=215bb12e999f,45467,1732382620632-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T17:23:43,892 INFO [RS:0;215bb12e999f:45467 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T17:23:43,892 INFO [RS:0;215bb12e999f:45467 {}] regionserver.Replication(171): 215bb12e999f,45467,1732382620632 started 2024-11-23T17:23:43,895 INFO [RS:2;215bb12e999f:41065 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-23T17:23:43,895 INFO [RS:2;215bb12e999f:41065 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-23T17:23:43,895 INFO [RS:2;215bb12e999f:41065 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T17:23:43,895 INFO [RS:2;215bb12e999f:41065 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-23T17:23:43,896 INFO [RS:2;215bb12e999f:41065 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-23T17:23:43,896 INFO [RS:2;215bb12e999f:41065 {}] hbase.ChoreService(168): Chore ScheduledChore name=215bb12e999f,41065,1732382621000-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-23T17:23:43,902 INFO [RS:1;215bb12e999f:44667 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-23T17:23:43,903 INFO [RS:1;215bb12e999f:44667 {}] hbase.ChoreService(168): Chore ScheduledChore name=215bb12e999f,44667,1732382620918-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T17:23:43,903 INFO [RS:1;215bb12e999f:44667 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T17:23:43,904 INFO [RS:1;215bb12e999f:44667 {}] regionserver.Replication(171): 215bb12e999f,44667,1732382620918 started 2024-11-23T17:23:43,934 INFO [RS:1;215bb12e999f:44667 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T17:23:43,935 INFO [RS:1;215bb12e999f:44667 {}] regionserver.HRegionServer(1482): Serving as 215bb12e999f,44667,1732382620918, RpcServer on 215bb12e999f/172.17.0.2:44667, sessionid=0x10168b8ee290002 2024-11-23T17:23:43,936 DEBUG [RS:1;215bb12e999f:44667 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-23T17:23:43,937 DEBUG [RS:1;215bb12e999f:44667 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 215bb12e999f,44667,1732382620918 2024-11-23T17:23:43,939 INFO [RS:0;215bb12e999f:45467 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T17:23:43,940 INFO [RS:0;215bb12e999f:45467 {}] regionserver.HRegionServer(1482): Serving as 215bb12e999f,45467,1732382620632, RpcServer on 215bb12e999f/172.17.0.2:45467, sessionid=0x10168b8ee290001 2024-11-23T17:23:43,940 DEBUG [RS:0;215bb12e999f:45467 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-23T17:23:43,940 DEBUG [RS:0;215bb12e999f:45467 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 215bb12e999f,45467,1732382620632 2024-11-23T17:23:43,941 DEBUG [RS:0;215bb12e999f:45467 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '215bb12e999f,45467,1732382620632' 2024-11-23T17:23:43,941 DEBUG [RS:0;215bb12e999f:45467 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-23T17:23:43,942 DEBUG [RS:0;215bb12e999f:45467 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-23T17:23:43,943 DEBUG [RS:0;215bb12e999f:45467 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-23T17:23:43,943 DEBUG [RS:0;215bb12e999f:45467 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-23T17:23:43,943 DEBUG [RS:0;215bb12e999f:45467 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 215bb12e999f,45467,1732382620632 2024-11-23T17:23:43,943 DEBUG [RS:0;215bb12e999f:45467 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '215bb12e999f,45467,1732382620632' 2024-11-23T17:23:43,943 DEBUG [RS:0;215bb12e999f:45467 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-23T17:23:43,944 DEBUG [RS:0;215bb12e999f:45467 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-23T17:23:43,945 DEBUG [RS:0;215bb12e999f:45467 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-23T17:23:43,946 INFO [RS:0;215bb12e999f:45467 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-23T17:23:43,946 DEBUG [RS:1;215bb12e999f:44667 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '215bb12e999f,44667,1732382620918' 2024-11-23T17:23:43,946 DEBUG [RS:1;215bb12e999f:44667 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-23T17:23:43,946 INFO [RS:0;215bb12e999f:45467 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-23T17:23:43,947 DEBUG [RS:1;215bb12e999f:44667 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-23T17:23:43,948 DEBUG [RS:1;215bb12e999f:44667 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-23T17:23:43,948 DEBUG [RS:1;215bb12e999f:44667 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-23T17:23:43,948 DEBUG [RS:1;215bb12e999f:44667 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 215bb12e999f,44667,1732382620918 2024-11-23T17:23:43,948 DEBUG [RS:1;215bb12e999f:44667 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '215bb12e999f,44667,1732382620918' 2024-11-23T17:23:43,952 DEBUG [RS:1;215bb12e999f:44667 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-23T17:23:43,949 INFO [RS:2;215bb12e999f:41065 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-23T17:23:43,954 INFO [RS:2;215bb12e999f:41065 {}] hbase.ChoreService(168): Chore ScheduledChore name=215bb12e999f,41065,1732382621000-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T17:23:43,955 INFO [RS:2;215bb12e999f:41065 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T17:23:43,955 INFO [RS:2;215bb12e999f:41065 {}] regionserver.Replication(171): 215bb12e999f,41065,1732382621000 started 2024-11-23T17:23:43,964 DEBUG [RS:1;215bb12e999f:44667 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-23T17:23:43,966 DEBUG [RS:1;215bb12e999f:44667 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-23T17:23:43,966 INFO [RS:1;215bb12e999f:44667 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-23T17:23:43,966 INFO [RS:1;215bb12e999f:44667 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-23T17:23:43,987 INFO [RS:2;215bb12e999f:41065 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T17:23:43,988 INFO [RS:2;215bb12e999f:41065 {}] regionserver.HRegionServer(1482): Serving as 215bb12e999f,41065,1732382621000, RpcServer on 215bb12e999f/172.17.0.2:41065, sessionid=0x10168b8ee290003 2024-11-23T17:23:43,988 DEBUG [RS:2;215bb12e999f:41065 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-23T17:23:43,988 DEBUG [RS:2;215bb12e999f:41065 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 215bb12e999f,41065,1732382621000 2024-11-23T17:23:43,988 DEBUG [RS:2;215bb12e999f:41065 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '215bb12e999f,41065,1732382621000' 2024-11-23T17:23:43,989 DEBUG [RS:2;215bb12e999f:41065 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-23T17:23:43,990 DEBUG [RS:2;215bb12e999f:41065 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-23T17:23:43,991 DEBUG [RS:2;215bb12e999f:41065 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-23T17:23:43,991 DEBUG [RS:2;215bb12e999f:41065 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-23T17:23:43,991 DEBUG [RS:2;215bb12e999f:41065 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 215bb12e999f,41065,1732382621000 2024-11-23T17:23:43,991 DEBUG [RS:2;215bb12e999f:41065 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '215bb12e999f,41065,1732382621000' 2024-11-23T17:23:43,991 DEBUG [RS:2;215bb12e999f:41065 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-23T17:23:43,994 DEBUG [RS:2;215bb12e999f:41065 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-23T17:23:44,002 DEBUG [RS:2;215bb12e999f:41065 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-23T17:23:44,002 INFO [RS:2;215bb12e999f:41065 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-23T17:23:44,002 INFO [RS:2;215bb12e999f:41065 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-23T17:23:44,054 INFO [RS:0;215bb12e999f:45467 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-23T17:23:44,060 INFO [RS:0;215bb12e999f:45467 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=215bb12e999f%2C45467%2C1732382620632, suffix=, logDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/WALs/215bb12e999f,45467,1732382620632, archiveDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/oldWALs, maxLogs=32 2024-11-23T17:23:44,067 INFO [RS:1;215bb12e999f:44667 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-23T17:23:44,074 INFO [RS:1;215bb12e999f:44667 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=215bb12e999f%2C44667%2C1732382620918, suffix=, logDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/WALs/215bb12e999f,44667,1732382620918, archiveDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/oldWALs, maxLogs=32 2024-11-23T17:23:44,090 DEBUG [RS:0;215bb12e999f:45467 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/WALs/215bb12e999f,45467,1732382620632/215bb12e999f%2C45467%2C1732382620632.1732382624063, exclude list is [], retry=0 2024-11-23T17:23:44,104 INFO [RS:2;215bb12e999f:41065 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-23T17:23:44,106 DEBUG [RS:1;215bb12e999f:44667 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/WALs/215bb12e999f,44667,1732382620918/215bb12e999f%2C44667%2C1732382620918.1732382624079, exclude list is [], retry=0 2024-11-23T17:23:44,106 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46761,DS-7a832cf7-34ef-46bd-be01-46509a8fec53,DISK] 2024-11-23T17:23:44,106 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44591,DS-fb8b94d4-e3db-4dcc-bda0-734fb35ddb07,DISK] 2024-11-23T17:23:44,107 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45743,DS-b2901eba-e22f-4426-936a-68cae471a098,DISK] 2024-11-23T17:23:44,116 INFO [RS:2;215bb12e999f:41065 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=215bb12e999f%2C41065%2C1732382621000, suffix=, logDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/WALs/215bb12e999f,41065,1732382621000, archiveDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/oldWALs, maxLogs=32 2024-11-23T17:23:44,133 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46761,DS-7a832cf7-34ef-46bd-be01-46509a8fec53,DISK] 2024-11-23T17:23:44,133 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44591,DS-fb8b94d4-e3db-4dcc-bda0-734fb35ddb07,DISK] 2024-11-23T17:23:44,134 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45743,DS-b2901eba-e22f-4426-936a-68cae471a098,DISK] 2024-11-23T17:23:44,230 DEBUG [RS:2;215bb12e999f:41065 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/WALs/215bb12e999f,41065,1732382621000/215bb12e999f%2C41065%2C1732382621000.1732382624118, exclude list is [], retry=0 2024-11-23T17:23:44,231 INFO [RS:0;215bb12e999f:45467 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/WALs/215bb12e999f,45467,1732382620632/215bb12e999f%2C45467%2C1732382620632.1732382624063 2024-11-23T17:23:44,236 DEBUG [RS:0;215bb12e999f:45467 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:44373:44373),(127.0.0.1/127.0.0.1:41345:41345),(127.0.0.1/127.0.0.1:33645:33645)] 2024-11-23T17:23:44,269 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45743,DS-b2901eba-e22f-4426-936a-68cae471a098,DISK] 2024-11-23T17:23:44,271 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44591,DS-fb8b94d4-e3db-4dcc-bda0-734fb35ddb07,DISK] 2024-11-23T17:23:44,272 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46761,DS-7a832cf7-34ef-46bd-be01-46509a8fec53,DISK] 2024-11-23T17:23:44,272 INFO [RS:1;215bb12e999f:44667 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/WALs/215bb12e999f,44667,1732382620918/215bb12e999f%2C44667%2C1732382620918.1732382624079 2024-11-23T17:23:44,296 DEBUG [RS:1;215bb12e999f:44667 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:44373:44373),(127.0.0.1/127.0.0.1:41345:41345),(127.0.0.1/127.0.0.1:33645:33645)] 2024-11-23T17:23:44,353 INFO [RS:2;215bb12e999f:41065 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/WALs/215bb12e999f,41065,1732382621000/215bb12e999f%2C41065%2C1732382621000.1732382624118 2024-11-23T17:23:44,360 DEBUG [RS:2;215bb12e999f:41065 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:41345:41345),(127.0.0.1/127.0.0.1:33645:33645),(127.0.0.1/127.0.0.1:44373:44373)] 2024-11-23T17:23:44,443 DEBUG [215bb12e999f:39621 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-11-23T17:23:44,453 DEBUG [215bb12e999f:39621 {}] balancer.BalancerClusterState(204): Hosts are {215bb12e999f=0} racks are {/default-rack=0} 2024-11-23T17:23:44,505 DEBUG [215bb12e999f:39621 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-23T17:23:44,505 DEBUG [215bb12e999f:39621 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-23T17:23:44,505 DEBUG [215bb12e999f:39621 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-23T17:23:44,505 DEBUG [215bb12e999f:39621 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-23T17:23:44,505 DEBUG [215bb12e999f:39621 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-23T17:23:44,505 DEBUG [215bb12e999f:39621 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-23T17:23:44,506 INFO [215bb12e999f:39621 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-23T17:23:44,506 INFO [215bb12e999f:39621 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-23T17:23:44,506 INFO [215bb12e999f:39621 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-23T17:23:44,506 DEBUG [215bb12e999f:39621 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-23T17:23:44,527 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=215bb12e999f,44667,1732382620918 2024-11-23T17:23:44,545 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 215bb12e999f,44667,1732382620918, state=OPENING 2024-11-23T17:23:44,563 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-23T17:23:44,576 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45467-0x10168b8ee290001, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T17:23:44,577 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44667-0x10168b8ee290002, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T17:23:44,577 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x10168b8ee290003, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T17:23:44,577 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T17:23:44,581 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T17:23:44,581 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T17:23:44,581 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T17:23:44,587 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T17:23:44,591 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-23T17:23:44,594 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=215bb12e999f,44667,1732382620918}] 2024-11-23T17:23:44,820 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-23T17:23:44,828 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58753, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-23T17:23:44,872 INFO [RS_OPEN_META-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-23T17:23:44,872 INFO [RS_OPEN_META-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-23T17:23:44,873 INFO [RS_OPEN_META-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-23T17:23:44,889 INFO [RS_OPEN_META-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=215bb12e999f%2C44667%2C1732382620918.meta, suffix=.meta, logDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/WALs/215bb12e999f,44667,1732382620918, archiveDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/oldWALs, maxLogs=32 2024-11-23T17:23:44,921 DEBUG [RS_OPEN_META-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/WALs/215bb12e999f,44667,1732382620918/215bb12e999f%2C44667%2C1732382620918.meta.1732382624892.meta, exclude list is [], retry=0 2024-11-23T17:23:44,927 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46761,DS-7a832cf7-34ef-46bd-be01-46509a8fec53,DISK] 2024-11-23T17:23:44,928 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45743,DS-b2901eba-e22f-4426-936a-68cae471a098,DISK] 2024-11-23T17:23:44,928 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44591,DS-fb8b94d4-e3db-4dcc-bda0-734fb35ddb07,DISK] 2024-11-23T17:23:44,957 INFO [RS_OPEN_META-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/WALs/215bb12e999f,44667,1732382620918/215bb12e999f%2C44667%2C1732382620918.meta.1732382624892.meta 2024-11-23T17:23:44,967 DEBUG [RS_OPEN_META-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:41345:41345),(127.0.0.1/127.0.0.1:33645:33645),(127.0.0.1/127.0.0.1:44373:44373)] 2024-11-23T17:23:44,968 DEBUG [RS_OPEN_META-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-23T17:23:44,969 DEBUG [RS_OPEN_META-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=AccessControlService 2024-11-23T17:23:44,971 INFO [RS_OPEN_META-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:23:44,972 DEBUG [RS_OPEN_META-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-23T17:23:44,974 DEBUG [RS_OPEN_META-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-23T17:23:44,976 INFO [RS_OPEN_META-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-23T17:23:44,987 DEBUG [RS_OPEN_META-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-23T17:23:44,989 DEBUG [RS_OPEN_META-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:23:44,990 DEBUG [RS_OPEN_META-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-23T17:23:44,990 DEBUG [RS_OPEN_META-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-23T17:23:44,996 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-23T17:23:45,012 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-23T17:23:45,012 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:23:45,029 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T17:23:45,029 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-23T17:23:45,038 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-23T17:23:45,038 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:23:45,045 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T17:23:45,046 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-23T17:23:45,057 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-23T17:23:45,057 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:23:45,059 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T17:23:45,059 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-23T17:23:45,061 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-23T17:23:45,061 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:23:45,063 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-23T17:23:45,063 DEBUG [RS_OPEN_META-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-23T17:23:45,065 DEBUG [RS_OPEN_META-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/hbase/meta/1588230740 2024-11-23T17:23:45,069 DEBUG [RS_OPEN_META-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/hbase/meta/1588230740 2024-11-23T17:23:45,074 DEBUG [RS_OPEN_META-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-23T17:23:45,074 DEBUG [RS_OPEN_META-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-23T17:23:45,076 DEBUG [RS_OPEN_META-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-23T17:23:45,080 DEBUG [RS_OPEN_META-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-23T17:23:45,083 INFO [RS_OPEN_META-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63615751, jitterRate=-0.05205143988132477}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-23T17:23:45,083 DEBUG [RS_OPEN_META-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-23T17:23:45,087 DEBUG [RS_OPEN_META-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732382624991Writing region info on filesystem at 1732382624992 (+1 ms)Initializing all the Stores at 1732382624995 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732382624995Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732382624995Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732382624995Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732382624995Cleaning up temporary data from old regions at 1732382625074 (+79 ms)Running coprocessor post-open hooks at 1732382625083 (+9 ms)Region opened successfully at 1732382625087 (+4 ms) 2024-11-23T17:23:45,099 INFO [RS_OPEN_META-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732382624782 2024-11-23T17:23:45,116 DEBUG [RS_OPEN_META-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-23T17:23:45,116 INFO [RS_OPEN_META-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-23T17:23:45,131 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=215bb12e999f,44667,1732382620918 2024-11-23T17:23:45,136 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 215bb12e999f,44667,1732382620918, state=OPEN 2024-11-23T17:23:45,152 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x10168b8ee290003, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-23T17:23:45,152 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-23T17:23:45,152 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44667-0x10168b8ee290002, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-23T17:23:45,153 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T17:23:45,153 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T17:23:45,153 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=215bb12e999f,44667,1732382620918 2024-11-23T17:23:45,155 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T17:23:45,155 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45467-0x10168b8ee290001, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-23T17:23:45,156 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-23T17:23:45,169 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-23T17:23:45,169 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=215bb12e999f,44667,1732382620918 in 559 msec 2024-11-23T17:23:45,188 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-23T17:23:45,188 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 1.6450 sec 2024-11-23T17:23:45,191 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-23T17:23:45,191 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-23T17:23:45,239 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:23:45,241 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=215bb12e999f,44667,1732382620918, seqNum=-1] 2024-11-23T17:23:45,281 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:23:45,285 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46017, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:23:45,346 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 2.0920 sec 2024-11-23T17:23:45,346 INFO [master/215bb12e999f:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732382625346, completionTime=-1 2024-11-23T17:23:45,350 INFO [master/215bb12e999f:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-11-23T17:23:45,350 DEBUG [master/215bb12e999f:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-23T17:23:45,390 INFO [master/215bb12e999f:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-11-23T17:23:45,390 INFO [master/215bb12e999f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732382685390 2024-11-23T17:23:45,391 INFO [master/215bb12e999f:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732382745391 2024-11-23T17:23:45,391 INFO [master/215bb12e999f:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 40 msec 2024-11-23T17:23:45,397 DEBUG [master/215bb12e999f:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-11-23T17:23:45,412 INFO [master/215bb12e999f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=215bb12e999f,39621,1732382619205-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-23T17:23:45,412 INFO [master/215bb12e999f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=215bb12e999f,39621,1732382619205-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T17:23:45,413 INFO [master/215bb12e999f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=215bb12e999f,39621,1732382619205-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T17:23:45,415 INFO [master/215bb12e999f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-215bb12e999f:39621, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T17:23:45,416 INFO [master/215bb12e999f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-23T17:23:45,419 INFO [master/215bb12e999f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-23T17:23:45,426 DEBUG [master/215bb12e999f:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-23T17:23:45,462 INFO [master/215bb12e999f:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 4.202sec 2024-11-23T17:23:45,465 INFO [master/215bb12e999f:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-23T17:23:45,467 INFO [master/215bb12e999f:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-23T17:23:45,468 INFO [master/215bb12e999f:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-23T17:23:45,469 INFO [master/215bb12e999f:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-23T17:23:45,469 INFO [master/215bb12e999f:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-23T17:23:45,470 INFO [master/215bb12e999f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=215bb12e999f,39621,1732382619205-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-23T17:23:45,471 INFO [master/215bb12e999f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=215bb12e999f,39621,1732382619205-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-23T17:23:45,511 DEBUG [master/215bb12e999f:0:becomeActiveMaster {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-23T17:23:45,511 DEBUG [master/215bb12e999f:0:becomeActiveMaster {}] client.AsyncConnectionImpl(321): The fetched master address is 215bb12e999f,39621,1732382619205 2024-11-23T17:23:45,515 DEBUG [master/215bb12e999f:0:becomeActiveMaster {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@c3a388d 2024-11-23T17:23:45,517 DEBUG [master/215bb12e999f:0:becomeActiveMaster {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-23T17:23:45,520 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58891, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-23T17:23:45,528 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39621 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'hbase:acl', {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-23T17:23:45,552 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39621 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=hbase:acl 2024-11-23T17:23:45,558 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T17:23:45,559 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:23:45,559 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39621 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "hbase" qualifier: "acl" procId is: 4 2024-11-23T17:23:45,565 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T17:23:45,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-23T17:23:45,578 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3b1b1162, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:23:45,586 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-23T17:23:45,586 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-23T17:23:45,593 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 215bb12e999f,39621,-1 for getting cluster id 2024-11-23T17:23:45,609 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:23:45,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741837_1013 (size=349) 2024-11-23T17:23:45,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741837_1013 (size=349) 2024-11-23T17:23:45,669 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 41264850465b123187f03a78b2521b39, NAME => 'hbase:acl,,1732382625522.41264850465b123187f03a78b2521b39.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:acl', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf 2024-11-23T17:23:45,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741837_1013 (size=349) 2024-11-23T17:23:45,679 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d' 2024-11-23T17:23:45,683 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:23:45,683 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d" 2024-11-23T17:23:45,688 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d4ff098, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:23:45,688 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [215bb12e999f,39621,-1] 2024-11-23T17:23:45,692 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:23:45,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-23T17:23:45,709 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:23:45,710 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40168, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:23:45,715 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@70bf0d0d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:23:45,716 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:23:45,739 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=215bb12e999f,44667,1732382620918, seqNum=-1] 2024-11-23T17:23:45,740 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:23:45,747 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44316, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:23:45,755 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=215bb12e999f,39621,1732382619205 2024-11-23T17:23:45,755 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2305): Starting mini mapreduce cluster... 2024-11-23T17:23:45,755 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/test.cache.data in system properties and HBase conf 2024-11-23T17:23:45,755 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/hadoop.tmp.dir in system properties and HBase conf 2024-11-23T17:23:45,755 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/hadoop.log.dir in system properties and HBase conf 2024-11-23T17:23:45,755 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-23T17:23:45,755 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-23T17:23:45,756 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-23T17:23:45,756 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-23T17:23:45,757 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-23T17:23:45,757 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-23T17:23:45,757 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-23T17:23:45,757 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-23T17:23:45,757 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-23T17:23:45,757 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-23T17:23:45,757 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-23T17:23:45,757 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-23T17:23:45,757 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/nfs.dump.dir in system properties and HBase conf 2024-11-23T17:23:45,758 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/java.io.tmpdir in system properties and HBase conf 2024-11-23T17:23:45,758 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-23T17:23:45,758 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-23T17:23:45,758 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-23T17:23:45,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741838_1014 (size=36) 2024-11-23T17:23:45,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741838_1014 (size=36) 2024-11-23T17:23:45,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741838_1014 (size=36) 2024-11-23T17:23:45,799 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(898): Instantiated hbase:acl,,1732382625522.41264850465b123187f03a78b2521b39.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:23:45,800 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1722): Closing 41264850465b123187f03a78b2521b39, disabling compactions & flushes 2024-11-23T17:23:45,800 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1755): Closing region hbase:acl,,1732382625522.41264850465b123187f03a78b2521b39. 2024-11-23T17:23:45,800 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:acl,,1732382625522.41264850465b123187f03a78b2521b39. 2024-11-23T17:23:45,800 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on hbase:acl,,1732382625522.41264850465b123187f03a78b2521b39. after waiting 0 ms 2024-11-23T17:23:45,800 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region hbase:acl,,1732382625522.41264850465b123187f03a78b2521b39. 2024-11-23T17:23:45,800 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1973): Closed hbase:acl,,1732382625522.41264850465b123187f03a78b2521b39. 2024-11-23T17:23:45,800 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1676): Region close journal for 41264850465b123187f03a78b2521b39: Waiting for close lock at 1732382625799Disabling compacts and flushes for region at 1732382625800 (+1 ms)Disabling writes for close at 1732382625800Writing region close event to WAL at 1732382625800Closed at 1732382625800 2024-11-23T17:23:45,806 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T17:23:45,815 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"hbase:acl,,1732382625522.41264850465b123187f03a78b2521b39.","families":{"info":[{"qualifier":"regioninfo","vlen":35,"tag":[],"timestamp":"1732382625808"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732382625808"}]},"ts":"1732382625808"} 2024-11-23T17:23:45,824 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-23T17:23:45,828 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T17:23:45,910 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:acl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732382625828"}]},"ts":"1732382625828"} 2024-11-23T17:23:45,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-23T17:23:45,931 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:acl, state=ENABLING in hbase:meta 2024-11-23T17:23:45,932 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {215bb12e999f=0} racks are {/default-rack=0} 2024-11-23T17:23:45,943 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-23T17:23:45,943 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-23T17:23:45,944 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-23T17:23:45,944 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-23T17:23:45,944 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-23T17:23:45,944 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-23T17:23:45,944 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-23T17:23:45,944 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-23T17:23:45,944 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-23T17:23:45,944 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-23T17:23:45,946 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=41264850465b123187f03a78b2521b39, ASSIGN}] 2024-11-23T17:23:45,954 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=41264850465b123187f03a78b2521b39, ASSIGN 2024-11-23T17:23:45,966 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:acl, region=41264850465b123187f03a78b2521b39, ASSIGN; state=OFFLINE, location=215bb12e999f,41065,1732382621000; forceNewPlan=false, retain=false 2024-11-23T17:23:46,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741839_1015 (size=592039) 2024-11-23T17:23:46,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741839_1015 (size=592039) 2024-11-23T17:23:46,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741839_1015 (size=592039) 2024-11-23T17:23:46,125 INFO [215bb12e999f:39621 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-23T17:23:46,128 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=41264850465b123187f03a78b2521b39, regionState=OPENING, regionLocation=215bb12e999f,41065,1732382621000 2024-11-23T17:23:46,145 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:acl, region=41264850465b123187f03a78b2521b39, ASSIGN because future has completed 2024-11-23T17:23:46,158 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 41264850465b123187f03a78b2521b39, server=215bb12e999f,41065,1732382621000}] 2024-11-23T17:23:46,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741840_1016 (size=1663647) 2024-11-23T17:23:46,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741840_1016 (size=1663647) 2024-11-23T17:23:46,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741840_1016 (size=1663647) 2024-11-23T17:23:46,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-23T17:23:46,363 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-23T17:23:46,406 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55283, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-23T17:23:46,450 INFO [RS_OPEN_PRIORITY_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(132): Open hbase:acl,,1732382625522.41264850465b123187f03a78b2521b39. 2024-11-23T17:23:46,451 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 41264850465b123187f03a78b2521b39, NAME => 'hbase:acl,,1732382625522.41264850465b123187f03a78b2521b39.', STARTKEY => '', ENDKEY => ''} 2024-11-23T17:23:46,452 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:acl,,1732382625522.41264850465b123187f03a78b2521b39. service=AccessControlService 2024-11-23T17:23:46,452 INFO [RS_OPEN_PRIORITY_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:23:46,452 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table acl 41264850465b123187f03a78b2521b39 2024-11-23T17:23:46,453 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(898): Instantiated hbase:acl,,1732382625522.41264850465b123187f03a78b2521b39.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:23:46,453 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 41264850465b123187f03a78b2521b39 2024-11-23T17:23:46,453 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 41264850465b123187f03a78b2521b39 2024-11-23T17:23:46,461 INFO [StoreOpener-41264850465b123187f03a78b2521b39-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family l of region 41264850465b123187f03a78b2521b39 2024-11-23T17:23:46,464 INFO [StoreOpener-41264850465b123187f03a78b2521b39-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 41264850465b123187f03a78b2521b39 columnFamilyName l 2024-11-23T17:23:46,464 DEBUG [StoreOpener-41264850465b123187f03a78b2521b39-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:23:46,466 INFO [StoreOpener-41264850465b123187f03a78b2521b39-1 {}] regionserver.HStore(327): Store=41264850465b123187f03a78b2521b39/l, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T17:23:46,467 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 41264850465b123187f03a78b2521b39 2024-11-23T17:23:46,476 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/hbase/acl/41264850465b123187f03a78b2521b39 2024-11-23T17:23:46,478 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/hbase/acl/41264850465b123187f03a78b2521b39 2024-11-23T17:23:46,479 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 41264850465b123187f03a78b2521b39 2024-11-23T17:23:46,479 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 41264850465b123187f03a78b2521b39 2024-11-23T17:23:46,484 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 41264850465b123187f03a78b2521b39 2024-11-23T17:23:46,497 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/hbase/acl/41264850465b123187f03a78b2521b39/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T17:23:46,503 INFO [RS_OPEN_PRIORITY_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1114): Opened 41264850465b123187f03a78b2521b39; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61519924, jitterRate=-0.08328169584274292}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T17:23:46,503 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 41264850465b123187f03a78b2521b39 2024-11-23T17:23:46,508 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 41264850465b123187f03a78b2521b39: Running coprocessor pre-open hook at 1732382626453Writing region info on filesystem at 1732382626453Initializing all the Stores at 1732382626460 (+7 ms)Instantiating store for column family {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732382626460Cleaning up temporary data from old regions at 1732382626479 (+19 ms)Running coprocessor post-open hooks at 1732382626503 (+24 ms)Region opened successfully at 1732382626508 (+5 ms) 2024-11-23T17:23:46,513 INFO [RS_OPEN_PRIORITY_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:acl,,1732382625522.41264850465b123187f03a78b2521b39., pid=6, masterSystemTime=1732382626361 2024-11-23T17:23:46,524 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:acl,,1732382625522.41264850465b123187f03a78b2521b39. 2024-11-23T17:23:46,524 INFO [RS_OPEN_PRIORITY_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(153): Opened hbase:acl,,1732382625522.41264850465b123187f03a78b2521b39. 2024-11-23T17:23:46,533 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=41264850465b123187f03a78b2521b39, regionState=OPEN, openSeqNum=2, regionLocation=215bb12e999f,41065,1732382621000 2024-11-23T17:23:46,542 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 41264850465b123187f03a78b2521b39, server=215bb12e999f,41065,1732382621000 because future has completed 2024-11-23T17:23:46,574 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-23T17:23:46,580 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 41264850465b123187f03a78b2521b39, server=215bb12e999f,41065,1732382621000 in 397 msec 2024-11-23T17:23:46,584 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-23T17:23:46,584 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=41264850465b123187f03a78b2521b39, ASSIGN in 624 msec 2024-11-23T17:23:46,605 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T17:23:46,606 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:acl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732382626606"}]},"ts":"1732382626606"} 2024-11-23T17:23:46,615 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:acl, state=ENABLED in hbase:meta 2024-11-23T17:23:46,619 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T17:23:46,625 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=hbase:acl in 1.0850 sec 2024-11-23T17:23:46,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-23T17:23:46,744 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: hbase:acl completed 2024-11-23T17:23:46,764 DEBUG [master/215bb12e999f:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-23T17:23:46,766 INFO [master/215bb12e999f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-23T17:23:46,766 INFO [master/215bb12e999f:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=215bb12e999f,39621,1732382619205-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-23T17:23:48,391 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T17:23:48,456 WARN [Thread-384 {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T17:23:48,880 INFO [Thread-384 {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T17:23:48,880 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-11-23T17:23:48,882 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T17:23:48,950 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T17:23:48,950 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T17:23:48,950 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-23T17:23:48,962 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T17:23:49,000 INFO [Thread-384 {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T17:23:49,000 INFO [Thread-384 {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T17:23:49,004 INFO [Thread-384 {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-23T17:23:49,017 INFO [Thread-384 {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@482db9e0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/hadoop.log.dir/,AVAILABLE} 2024-11-23T17:23:49,019 INFO [Thread-384 {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@294ca8f9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-11-23T17:23:49,054 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@305c2704{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/hadoop.log.dir/,AVAILABLE} 2024-11-23T17:23:49,054 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2bdadcc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-11-23T17:23:49,457 INFO [Thread-384 {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.resourcemanager.webapp.JAXBContextResolver as a provider class 2024-11-23T17:23:49,464 INFO [Thread-384 {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices as a root resource class 2024-11-23T17:23:49,464 INFO [Thread-384 {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-11-23T17:23:49,467 INFO [Thread-384 {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-11-23T17:23:49,649 INFO [Thread-384 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.resourcemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-23T17:23:49,734 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:acl' 2024-11-23T17:23:49,748 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-23T17:23:49,807 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:23:49,807 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase RegionServerObservers 2024-11-23T17:23:49,808 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-23T17:23:49,808 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-23T17:23:49,830 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_acl 2024-11-23T17:23:49,830 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_acl Metrics about Tables on a single HBase RegionServer 2024-11-23T17:23:49,836 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:23:49,836 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase MasterObservers 2024-11-23T17:23:49,836 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-11-23T17:23:49,836 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver Metrics about HBase MasterObservers 2024-11-23T17:23:49,838 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:23:49,838 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase RegionObservers 2024-11-23T17:23:49,838 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-23T17:23:49,838 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-23T17:23:49,839 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-23T17:23:49,839 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-23T17:23:50,562 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-23T17:23:50,812 INFO [Thread-384 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-23T17:23:52,013 INFO [Thread-384 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-23T17:23:52,070 INFO [Thread-384 {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5c56c60{jobhistory,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/java.io.tmpdir/jetty-localhost-42885-hadoop-yarn-common-3_4_1_jar-_-any-7620391381454569204/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/jobhistory} 2024-11-23T17:23:52,073 INFO [Thread-384 {}] server.AbstractConnector(333): Started ServerConnector@118b729d{HTTP/1.1, (http/1.1)}{localhost:42885} 2024-11-23T17:23:52,073 INFO [Thread-384 {}] server.Server(415): Started @25770ms 2024-11-23T17:23:52,076 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@68bb6bdc{cluster,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/java.io.tmpdir/jetty-localhost-39251-hadoop-yarn-common-3_4_1_jar-_-any-583153978730632449/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/cluster} 2024-11-23T17:23:52,084 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@388f6f6{HTTP/1.1, (http/1.1)}{localhost:39251} 2024-11-23T17:23:52,084 INFO [Time-limited test {}] server.Server(415): Started @25781ms 2024-11-23T17:23:52,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741841_1017 (size=5) 2024-11-23T17:23:52,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741841_1017 (size=5) 2024-11-23T17:23:52,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741841_1017 (size=5) 2024-11-23T17:23:55,268 WARN [Time-limited test {}] tracker.NMLogAggregationStatusTracker(95): Log Aggregation is disabled.So is the LogAggregationStatusTracker. 2024-11-23T17:23:55,280 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T17:23:55,428 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-11-23T17:23:55,434 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T17:23:55,533 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T17:23:55,533 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T17:23:55,533 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-23T17:23:55,542 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T17:23:55,552 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4ada4a0a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/hadoop.log.dir/,AVAILABLE} 2024-11-23T17:23:55,553 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@274b914{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-11-23T17:23:55,702 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class 2024-11-23T17:23:55,702 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-11-23T17:23:55,702 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class 2024-11-23T17:23:55,702 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-11-23T17:23:55,758 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-23T17:23:55,844 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-23T17:23:56,223 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-23T17:23:56,265 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@129022cc{node,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/java.io.tmpdir/jetty-localhost-35813-hadoop-yarn-common-3_4_1_jar-_-any-16247368074284875871/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-11-23T17:23:56,278 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@35820016{HTTP/1.1, (http/1.1)}{localhost:35813} 2024-11-23T17:23:56,279 INFO [Time-limited test {}] server.Server(415): Started @29974ms 2024-11-23T17:23:57,104 WARN [Time-limited test {}] tracker.NMLogAggregationStatusTracker(95): Log Aggregation is disabled.So is the LogAggregationStatusTracker. 2024-11-23T17:23:57,118 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T17:23:57,185 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-11-23T17:23:57,187 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-23T17:23:57,228 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-23T17:23:57,229 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-23T17:23:57,229 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-23T17:23:57,249 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-23T17:23:57,252 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@21927721{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/hadoop.log.dir/,AVAILABLE} 2024-11-23T17:23:57,253 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1bd14f10{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-11-23T17:23:57,367 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class 2024-11-23T17:23:57,367 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-11-23T17:23:57,368 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class 2024-11-23T17:23:57,369 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-11-23T17:23:57,395 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-23T17:23:57,431 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-23T17:23:57,635 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-23T17:23:57,643 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1bc03170{node,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/java.io.tmpdir/jetty-localhost-36349-hadoop-yarn-common-3_4_1_jar-_-any-1842250675276994559/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-11-23T17:23:57,650 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@33ae5ff8{HTTP/1.1, (http/1.1)}{localhost:36349} 2024-11-23T17:23:57,650 INFO [Time-limited test {}] server.Server(415): Started @31346ms 2024-11-23T17:23:57,716 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2341): Mini mapreduce cluster started 2024-11-23T17:23:57,718 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [30,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T17:23:57,808 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithSplitRegion Thread=715, OpenFileDescriptor=771, MaxFileDescriptor=1048576, SystemLoadAverage=754, ProcessCount=13, AvailableMemoryMB=1777 2024-11-23T17:23:57,811 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=715 is superior to 500 2024-11-23T17:23:57,816 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-23T17:23:57,823 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 215bb12e999f,39621,1732382619205 2024-11-23T17:23:57,824 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@66fddda0 2024-11-23T17:23:57,824 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-23T17:23:57,833 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46528, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-23T17:23:57,834 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithSplitRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T17:23:57,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-11-23T17:23:57,867 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T17:23:57,869 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:23:57,870 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithSplitRegion" procId is: 7 2024-11-23T17:23:57,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-23T17:23:57,874 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T17:23:57,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741842_1018 (size=422) 2024-11-23T17:23:57,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-23T17:23:57,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741842_1018 (size=422) 2024-11-23T17:23:57,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741842_1018 (size=422) 2024-11-23T17:23:58,009 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 28218a36ba9ccc9cb78380286e1f08cb, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,,1732382637834.28218a36ba9ccc9cb78380286e1f08cb.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithSplitRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf 2024-11-23T17:23:58,012 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 3aa5942783f70257c637cc1abea423ca, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,1,1732382637834.3aa5942783f70257c637cc1abea423ca.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithSplitRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf 2024-11-23T17:23:58,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741843_1019 (size=83) 2024-11-23T17:23:58,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741843_1019 (size=83) 2024-11-23T17:23:58,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741843_1019 (size=83) 2024-11-23T17:23:58,151 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSplitRegion,,1732382637834.28218a36ba9ccc9cb78380286e1f08cb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:23:58,151 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1722): Closing 28218a36ba9ccc9cb78380286e1f08cb, disabling compactions & flushes 2024-11-23T17:23:58,151 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSplitRegion,,1732382637834.28218a36ba9ccc9cb78380286e1f08cb. 2024-11-23T17:23:58,151 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSplitRegion,,1732382637834.28218a36ba9ccc9cb78380286e1f08cb. 2024-11-23T17:23:58,151 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSplitRegion,,1732382637834.28218a36ba9ccc9cb78380286e1f08cb. after waiting 0 ms 2024-11-23T17:23:58,151 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSplitRegion,,1732382637834.28218a36ba9ccc9cb78380286e1f08cb. 2024-11-23T17:23:58,151 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSplitRegion,,1732382637834.28218a36ba9ccc9cb78380286e1f08cb. 2024-11-23T17:23:58,152 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1676): Region close journal for 28218a36ba9ccc9cb78380286e1f08cb: Waiting for close lock at 1732382638151Disabling compacts and flushes for region at 1732382638151Disabling writes for close at 1732382638151Writing region close event to WAL at 1732382638151Closed at 1732382638151 2024-11-23T17:23:58,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741844_1020 (size=83) 2024-11-23T17:23:58,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741844_1020 (size=83) 2024-11-23T17:23:58,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741844_1020 (size=83) 2024-11-23T17:23:58,175 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSplitRegion,1,1732382637834.3aa5942783f70257c637cc1abea423ca.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:23:58,175 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1722): Closing 3aa5942783f70257c637cc1abea423ca, disabling compactions & flushes 2024-11-23T17:23:58,175 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSplitRegion,1,1732382637834.3aa5942783f70257c637cc1abea423ca. 2024-11-23T17:23:58,175 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSplitRegion,1,1732382637834.3aa5942783f70257c637cc1abea423ca. 2024-11-23T17:23:58,175 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSplitRegion,1,1732382637834.3aa5942783f70257c637cc1abea423ca. after waiting 0 ms 2024-11-23T17:23:58,175 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSplitRegion,1,1732382637834.3aa5942783f70257c637cc1abea423ca. 2024-11-23T17:23:58,175 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSplitRegion,1,1732382637834.3aa5942783f70257c637cc1abea423ca. 2024-11-23T17:23:58,175 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1676): Region close journal for 3aa5942783f70257c637cc1abea423ca: Waiting for close lock at 1732382638175Disabling compacts and flushes for region at 1732382638175Disabling writes for close at 1732382638175Writing region close event to WAL at 1732382638175Closed at 1732382638175 2024-11-23T17:23:58,182 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T17:23:58,183 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSplitRegion,,1732382637834.28218a36ba9ccc9cb78380286e1f08cb.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1732382638182"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732382638182"}]},"ts":"1732382638182"} 2024-11-23T17:23:58,183 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSplitRegion,1,1732382637834.3aa5942783f70257c637cc1abea423ca.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1732382638182"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732382638182"}]},"ts":"1732382638182"} 2024-11-23T17:23:58,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-23T17:23:58,317 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-23T17:23:58,323 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T17:23:58,323 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732382638323"}]},"ts":"1732382638323"} 2024-11-23T17:23:58,337 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSplitRegion, state=ENABLING in hbase:meta 2024-11-23T17:23:58,338 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {215bb12e999f=0} racks are {/default-rack=0} 2024-11-23T17:23:58,344 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-23T17:23:58,344 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-23T17:23:58,344 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-23T17:23:58,344 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-23T17:23:58,344 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-23T17:23:58,344 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-23T17:23:58,344 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-23T17:23:58,344 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-23T17:23:58,344 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-23T17:23:58,344 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-23T17:23:58,345 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=28218a36ba9ccc9cb78380286e1f08cb, ASSIGN}, {pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=3aa5942783f70257c637cc1abea423ca, ASSIGN}] 2024-11-23T17:23:58,349 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=3aa5942783f70257c637cc1abea423ca, ASSIGN 2024-11-23T17:23:58,352 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=3aa5942783f70257c637cc1abea423ca, ASSIGN; state=OFFLINE, location=215bb12e999f,41065,1732382621000; forceNewPlan=false, retain=false 2024-11-23T17:23:58,352 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=28218a36ba9ccc9cb78380286e1f08cb, ASSIGN 2024-11-23T17:23:58,354 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=28218a36ba9ccc9cb78380286e1f08cb, ASSIGN; state=OFFLINE, location=215bb12e999f,45467,1732382620632; forceNewPlan=false, retain=false 2024-11-23T17:23:58,504 INFO [215bb12e999f:39621 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-23T17:23:58,505 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=28218a36ba9ccc9cb78380286e1f08cb, regionState=OPENING, regionLocation=215bb12e999f,45467,1732382620632 2024-11-23T17:23:58,512 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=3aa5942783f70257c637cc1abea423ca, regionState=OPENING, regionLocation=215bb12e999f,41065,1732382621000 2024-11-23T17:23:58,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-23T17:23:58,525 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=28218a36ba9ccc9cb78380286e1f08cb, ASSIGN because future has completed 2024-11-23T17:23:58,529 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=8, state=RUNNABLE, hasLock=false; OpenRegionProcedure 28218a36ba9ccc9cb78380286e1f08cb, server=215bb12e999f,45467,1732382620632}] 2024-11-23T17:23:58,542 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=3aa5942783f70257c637cc1abea423ca, ASSIGN because future has completed 2024-11-23T17:23:58,552 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=11, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3aa5942783f70257c637cc1abea423ca, server=215bb12e999f,41065,1732382621000}] 2024-11-23T17:23:58,717 DEBUG [RSProcedureDispatcher-pool-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-23T17:23:58,777 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57881, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-23T17:23:58,791 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSplitRegion,1,1732382637834.3aa5942783f70257c637cc1abea423ca. 2024-11-23T17:23:58,792 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7752): Opening region: {ENCODED => 3aa5942783f70257c637cc1abea423ca, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,1,1732382637834.3aa5942783f70257c637cc1abea423ca.', STARTKEY => '1', ENDKEY => ''} 2024-11-23T17:23:58,792 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSplitRegion,1,1732382637834.3aa5942783f70257c637cc1abea423ca. service=AccessControlService 2024-11-23T17:23:58,792 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:23:58,793 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSplitRegion 3aa5942783f70257c637cc1abea423ca 2024-11-23T17:23:58,793 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSplitRegion,1,1732382637834.3aa5942783f70257c637cc1abea423ca.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:23:58,793 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7794): checking encryption for 3aa5942783f70257c637cc1abea423ca 2024-11-23T17:23:58,793 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7797): checking classloading for 3aa5942783f70257c637cc1abea423ca 2024-11-23T17:23:58,814 INFO [StoreOpener-3aa5942783f70257c637cc1abea423ca-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 3aa5942783f70257c637cc1abea423ca 2024-11-23T17:23:58,833 INFO [StoreOpener-3aa5942783f70257c637cc1abea423ca-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3aa5942783f70257c637cc1abea423ca columnFamilyName cf 2024-11-23T17:23:58,833 DEBUG [StoreOpener-3aa5942783f70257c637cc1abea423ca-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:23:58,840 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSplitRegion,,1732382637834.28218a36ba9ccc9cb78380286e1f08cb. 2024-11-23T17:23:58,841 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7752): Opening region: {ENCODED => 28218a36ba9ccc9cb78380286e1f08cb, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,,1732382637834.28218a36ba9ccc9cb78380286e1f08cb.', STARTKEY => '', ENDKEY => '1'} 2024-11-23T17:23:58,841 INFO [StoreOpener-3aa5942783f70257c637cc1abea423ca-1 {}] regionserver.HStore(327): Store=3aa5942783f70257c637cc1abea423ca/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T17:23:58,841 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1038): replaying wal for 3aa5942783f70257c637cc1abea423ca 2024-11-23T17:23:58,843 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSplitRegion,,1732382637834.28218a36ba9ccc9cb78380286e1f08cb. service=AccessControlService 2024-11-23T17:23:58,844 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:23:58,844 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSplitRegion 28218a36ba9ccc9cb78380286e1f08cb 2024-11-23T17:23:58,844 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSplitRegion,,1732382637834.28218a36ba9ccc9cb78380286e1f08cb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:23:58,845 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7794): checking encryption for 28218a36ba9ccc9cb78380286e1f08cb 2024-11-23T17:23:58,845 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7797): checking classloading for 28218a36ba9ccc9cb78380286e1f08cb 2024-11-23T17:23:58,846 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithSplitRegion/3aa5942783f70257c637cc1abea423ca 2024-11-23T17:23:58,849 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithSplitRegion/3aa5942783f70257c637cc1abea423ca 2024-11-23T17:23:58,851 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1048): stopping wal replay for 3aa5942783f70257c637cc1abea423ca 2024-11-23T17:23:58,851 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1060): Cleaning up temporary data for 3aa5942783f70257c637cc1abea423ca 2024-11-23T17:23:58,864 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1093): writing seq id for 3aa5942783f70257c637cc1abea423ca 2024-11-23T17:23:58,883 INFO [StoreOpener-28218a36ba9ccc9cb78380286e1f08cb-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 28218a36ba9ccc9cb78380286e1f08cb 2024-11-23T17:23:58,893 INFO [StoreOpener-28218a36ba9ccc9cb78380286e1f08cb-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 28218a36ba9ccc9cb78380286e1f08cb columnFamilyName cf 2024-11-23T17:23:58,894 DEBUG [StoreOpener-28218a36ba9ccc9cb78380286e1f08cb-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:23:58,896 INFO [StoreOpener-28218a36ba9ccc9cb78380286e1f08cb-1 {}] regionserver.HStore(327): Store=28218a36ba9ccc9cb78380286e1f08cb/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T17:23:58,896 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1038): replaying wal for 28218a36ba9ccc9cb78380286e1f08cb 2024-11-23T17:23:58,904 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithSplitRegion/28218a36ba9ccc9cb78380286e1f08cb 2024-11-23T17:23:58,906 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithSplitRegion/28218a36ba9ccc9cb78380286e1f08cb 2024-11-23T17:23:58,908 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithSplitRegion/3aa5942783f70257c637cc1abea423ca/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T17:23:58,910 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1114): Opened 3aa5942783f70257c637cc1abea423ca; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67006997, jitterRate=-0.0015179365873336792}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T17:23:58,911 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 3aa5942783f70257c637cc1abea423ca 2024-11-23T17:23:58,912 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1006): Region open journal for 3aa5942783f70257c637cc1abea423ca: Running coprocessor pre-open hook at 1732382638793Writing region info on filesystem at 1732382638793Initializing all the Stores at 1732382638812 (+19 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732382638812Cleaning up temporary data from old regions at 1732382638851 (+39 ms)Running coprocessor post-open hooks at 1732382638911 (+60 ms)Region opened successfully at 1732382638912 (+1 ms) 2024-11-23T17:23:58,913 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1048): stopping wal replay for 28218a36ba9ccc9cb78380286e1f08cb 2024-11-23T17:23:58,913 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1060): Cleaning up temporary data for 28218a36ba9ccc9cb78380286e1f08cb 2024-11-23T17:23:58,929 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1093): writing seq id for 28218a36ba9ccc9cb78380286e1f08cb 2024-11-23T17:23:58,954 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSplitRegion,1,1732382637834.3aa5942783f70257c637cc1abea423ca., pid=11, masterSystemTime=1732382638732 2024-11-23T17:23:58,961 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithSplitRegion/28218a36ba9ccc9cb78380286e1f08cb/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T17:23:58,964 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1114): Opened 28218a36ba9ccc9cb78380286e1f08cb; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60957172, jitterRate=-0.09166735410690308}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T17:23:58,964 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 28218a36ba9ccc9cb78380286e1f08cb 2024-11-23T17:23:58,965 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1006): Region open journal for 28218a36ba9ccc9cb78380286e1f08cb: Running coprocessor pre-open hook at 1732382638845Writing region info on filesystem at 1732382638845Initializing all the Stores at 1732382638853 (+8 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732382638853Cleaning up temporary data from old regions at 1732382638913 (+60 ms)Running coprocessor post-open hooks at 1732382638964 (+51 ms)Region opened successfully at 1732382638965 (+1 ms) 2024-11-23T17:23:58,969 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSplitRegion,,1732382637834.28218a36ba9ccc9cb78380286e1f08cb., pid=10, masterSystemTime=1732382638716 2024-11-23T17:23:58,979 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSplitRegion,1,1732382637834.3aa5942783f70257c637cc1abea423ca. 2024-11-23T17:23:58,979 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSplitRegion,1,1732382637834.3aa5942783f70257c637cc1abea423ca. 2024-11-23T17:23:58,986 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=3aa5942783f70257c637cc1abea423ca, regionState=OPEN, openSeqNum=2, regionLocation=215bb12e999f,41065,1732382621000 2024-11-23T17:23:58,990 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSplitRegion,,1732382637834.28218a36ba9ccc9cb78380286e1f08cb. 2024-11-23T17:23:58,990 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSplitRegion,,1732382637834.28218a36ba9ccc9cb78380286e1f08cb. 2024-11-23T17:23:58,994 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3aa5942783f70257c637cc1abea423ca, server=215bb12e999f,41065,1732382621000 because future has completed 2024-11-23T17:23:58,995 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=28218a36ba9ccc9cb78380286e1f08cb, regionState=OPEN, openSeqNum=2, regionLocation=215bb12e999f,45467,1732382620632 2024-11-23T17:23:59,006 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=8, state=RUNNABLE, hasLock=false; OpenRegionProcedure 28218a36ba9ccc9cb78380286e1f08cb, server=215bb12e999f,45467,1732382620632 because future has completed 2024-11-23T17:23:59,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-23T17:23:59,039 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=3aa5942783f70257c637cc1abea423ca, ASSIGN in 684 msec 2024-11-23T17:23:59,061 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=9 2024-11-23T17:23:59,061 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=9, state=SUCCESS, hasLock=false; OpenRegionProcedure 3aa5942783f70257c637cc1abea423ca, server=215bb12e999f,41065,1732382621000 in 461 msec 2024-11-23T17:23:59,062 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=8 2024-11-23T17:23:59,062 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=8, state=SUCCESS, hasLock=false; OpenRegionProcedure 28218a36ba9ccc9cb78380286e1f08cb, server=215bb12e999f,45467,1732382620632 in 489 msec 2024-11-23T17:23:59,062 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-23T17:23:59,062 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=28218a36ba9ccc9cb78380286e1f08cb, ASSIGN in 704 msec 2024-11-23T17:23:59,065 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T17:23:59,066 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732382639065"}]},"ts":"1732382639065"} 2024-11-23T17:23:59,074 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSplitRegion, state=ENABLED in hbase:meta 2024-11-23T17:23:59,077 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T17:23:59,106 DEBUG [PEWorker-5 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithSplitRegion jenkins: RWXCA 2024-11-23T17:23:59,130 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1732382625522.41264850465b123187f03a78b2521b39., hostname=215bb12e999f,41065,1732382621000, seqNum=2] 2024-11-23T17:23:59,132 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:23:59,137 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50445, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:23:59,155 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41065 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:23:59,155 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41065 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=215bb12e999f,44667,1732382620918, seqNum=-1] 2024-11-23T17:23:59,156 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41065 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:23:59,190 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43997, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=ClientService 2024-11-23T17:23:59,197 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1732382625522.41264850465b123187f03a78b2521b39., hostname=215bb12e999f,41065,1732382621000, seqNum=2] 2024-11-23T17:23:59,198 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:23:59,200 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34219, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=ClientService 2024-11-23T17:23:59,202 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41065 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-11-23T17:23:59,269 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44667-0x10168b8ee290002, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-11-23T17:23:59,269 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44667-0x10168b8ee290002, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T17:23:59,271 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x10168b8ee290003, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-11-23T17:23:59,271 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x10168b8ee290003, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T17:23:59,272 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-11-23T17:23:59,272 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T17:23:59,277 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45467-0x10168b8ee290001, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-11-23T17:23:59,277 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45467-0x10168b8ee290001, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T17:23:59,461 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T17:23:59,462 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T17:23:59,462 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T17:23:59,473 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T17:23:59,498 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion in 1.6400 sec 2024-11-23T17:23:59,807 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSplitRegion 2024-11-23T17:23:59,807 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSplitRegion Metrics about Tables on a single HBase RegionServer 2024-11-23T17:24:00,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-23T17:24:00,041 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-11-23T17:24:00,043 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportFileSystemStateWithSplitRegion get assigned. Timeout = 60000ms 2024-11-23T17:24:00,044 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T17:24:00,054 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportFileSystemStateWithSplitRegion assigned to meta. Checking AM states. 2024-11-23T17:24:00,055 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T17:24:00,058 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportFileSystemStateWithSplitRegion assigned. 2024-11-23T17:24:00,062 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-11-23T17:24:00,088 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-11-23T17:24:00,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732382640088 (current time:1732382640088). 2024-11-23T17:24:00,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-23T17:24:00,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemStateWithSplitRegion VERSION not specified, setting to 2 2024-11-23T17:24:00,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-23T17:24:00,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@bcf16e6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:24:00,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] client.ClusterIdFetcher(90): Going to request 215bb12e999f,39621,-1 for getting cluster id 2024-11-23T17:24:00,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:24:00,099 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d' 2024-11-23T17:24:00,100 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:24:00,100 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d" 2024-11-23T17:24:00,100 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@68beb4e8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:24:00,100 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [215bb12e999f,39621,-1] 2024-11-23T17:24:00,101 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:24:00,101 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:24:00,106 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46552, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:24:00,118 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@30a9a096, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:24:00,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:24:00,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=215bb12e999f,44667,1732382620918, seqNum=-1] 2024-11-23T17:24:00,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:24:00,130 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53724, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:24:00,134 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621. 2024-11-23T17:24:00,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:24:00,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:24:00,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:24:00,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d24a0cd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:24:00,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] client.ClusterIdFetcher(90): Going to request 215bb12e999f,39621,-1 for getting cluster id 2024-11-23T17:24:00,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:24:00,160 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:24:00,164 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d' 2024-11-23T17:24:00,168 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:24:00,169 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d" 2024-11-23T17:24:00,169 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2fec3563, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:24:00,169 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [215bb12e999f,39621,-1] 2024-11-23T17:24:00,170 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:24:00,170 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:24:00,178 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46562, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:24:00,189 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2017c2af, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:24:00,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:24:00,194 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=215bb12e999f,44667,1732382620918, seqNum=-1] 2024-11-23T17:24:00,195 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:24:00,197 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53728, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:24:00,214 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1732382625522.41264850465b123187f03a78b2521b39., hostname=215bb12e999f,41065,1732382621000, seqNum=2] 2024-11-23T17:24:00,214 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:24:00,220 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35340, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:24:00,231 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621. 2024-11-23T17:24:00,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:24:00,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:24:00,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:24:00,232 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:24:00,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-11-23T17:24:00,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-23T17:24:00,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] procedure2.ProcedureExecutor(1139): Stored pid=12, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-11-23T17:24:00,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 12 2024-11-23T17:24:00,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-11-23T17:24:00,269 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-23T17:24:00,284 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-23T17:24:00,312 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-23T17:24:00,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-11-23T17:24:00,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741845_1021 (size=215) 2024-11-23T17:24:00,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741845_1021 (size=215) 2024-11-23T17:24:00,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741845_1021 (size=215) 2024-11-23T17:24:00,411 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-23T17:24:00,415 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 28218a36ba9ccc9cb78380286e1f08cb}, {pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3aa5942783f70257c637cc1abea423ca}] 2024-11-23T17:24:00,423 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3aa5942783f70257c637cc1abea423ca 2024-11-23T17:24:00,436 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 28218a36ba9ccc9cb78380286e1f08cb 2024-11-23T17:24:00,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-11-23T17:24:00,588 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41065 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=14 2024-11-23T17:24:00,590 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,1,1732382637834.3aa5942783f70257c637cc1abea423ca. 2024-11-23T17:24:00,597 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 3aa5942783f70257c637cc1abea423ca: 2024-11-23T17:24:00,597 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSplitRegion,1,1732382637834.3aa5942783f70257c637cc1abea423ca. for emptySnaptb0-testExportFileSystemStateWithSplitRegion completed. 2024-11-23T17:24:00,601 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45467 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=13 2024-11-23T17:24:00,605 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSplitRegion,1,1732382637834.3aa5942783f70257c637cc1abea423ca.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-11-23T17:24:00,610 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:24:00,611 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,,1732382637834.28218a36ba9ccc9cb78380286e1f08cb. 2024-11-23T17:24:00,611 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.HRegion(2603): Flush status journal for 28218a36ba9ccc9cb78380286e1f08cb: 2024-11-23T17:24:00,611 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSplitRegion,,1732382637834.28218a36ba9ccc9cb78380286e1f08cb. for emptySnaptb0-testExportFileSystemStateWithSplitRegion completed. 2024-11-23T17:24:00,612 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSplitRegion,,1732382637834.28218a36ba9ccc9cb78380286e1f08cb.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-11-23T17:24:00,612 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:24:00,613 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-23T17:24:00,614 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-23T17:24:00,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741846_1022 (size=86) 2024-11-23T17:24:00,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741846_1022 (size=86) 2024-11-23T17:24:00,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741846_1022 (size=86) 2024-11-23T17:24:00,757 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,,1732382637834.28218a36ba9ccc9cb78380286e1f08cb. 2024-11-23T17:24:00,760 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-11-23T17:24:00,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.HMaster(4169): Remote procedure done, pid=13 2024-11-23T17:24:00,765 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSplitRegion on region 28218a36ba9ccc9cb78380286e1f08cb 2024-11-23T17:24:00,765 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 28218a36ba9ccc9cb78380286e1f08cb 2024-11-23T17:24:00,786 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=12, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 28218a36ba9ccc9cb78380286e1f08cb in 360 msec 2024-11-23T17:24:00,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741847_1023 (size=86) 2024-11-23T17:24:00,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741847_1023 (size=86) 2024-11-23T17:24:00,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741847_1023 (size=86) 2024-11-23T17:24:00,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-11-23T17:24:01,209 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,1,1732382637834.3aa5942783f70257c637cc1abea423ca. 2024-11-23T17:24:01,209 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-23T17:24:01,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-23T17:24:01,215 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSplitRegion on region 3aa5942783f70257c637cc1abea423ca 2024-11-23T17:24:01,215 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3aa5942783f70257c637cc1abea423ca 2024-11-23T17:24:01,234 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=12 2024-11-23T17:24:01,235 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=12, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 3aa5942783f70257c637cc1abea423ca in 807 msec 2024-11-23T17:24:01,235 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-23T17:24:01,241 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-23T17:24:01,251 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-23T17:24:01,251 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-11-23T17:24:01,258 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-11-23T17:24:01,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741848_1024 (size=597) 2024-11-23T17:24:01,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741848_1024 (size=597) 2024-11-23T17:24:01,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741848_1024 (size=597) 2024-11-23T17:24:01,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-11-23T17:24:01,440 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-23T17:24:01,500 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-23T17:24:01,506 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSplitRegion to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-11-23T17:24:01,522 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-23T17:24:01,522 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 12 2024-11-23T17:24:01,535 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } in 1.2780 sec 2024-11-23T17:24:01,734 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithSplitRegion' 2024-11-23T17:24:02,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-11-23T17:24:02,428 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-11-23T17:24:02,470 DEBUG [Time-limited test {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSplitRegion', row='033e37298954fec94110b03c313f31db0', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSplitRegion,,1732382637834.28218a36ba9ccc9cb78380286e1f08cb., hostname=215bb12e999f,45467,1732382620632, seqNum=2] 2024-11-23T17:24:02,486 DEBUG [Time-limited test {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSplitRegion', row='1adb35dfba431a4ae7e544bbf0a6f0d88', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSplitRegion,1,1732382637834.3aa5942783f70257c637cc1abea423ca., hostname=215bb12e999f,41065,1732382621000, seqNum=2] 2024-11-23T17:24:02,511 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:24:02,514 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:24:02,529 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55232, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:24:02,541 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35426, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:24:02,543 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45467 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSplitRegion,,1732382637834.28218a36ba9ccc9cb78380286e1f08cb. with WAL disabled. Data may be lost in the event of a crash. 2024-11-23T17:24:02,575 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41065 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSplitRegion,1,1732382637834.3aa5942783f70257c637cc1abea423ca. with WAL disabled. Data may be lost in the event of a crash. 2024-11-23T17:24:02,596 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-11-23T17:24:02,609 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithSplitRegion 2024-11-23T17:24:02,610 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithSplitRegion,,1732382637834.28218a36ba9ccc9cb78380286e1f08cb. 2024-11-23T17:24:02,614 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T17:24:02,627 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-11-23T17:24:02,670 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-11-23T17:24:02,740 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-11-23T17:24:02,768 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-11-23T17:24:02,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732382642768 (current time:1732382642768). 2024-11-23T17:24:02,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-23T17:24:02,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithSplitRegion VERSION not specified, setting to 2 2024-11-23T17:24:02,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-23T17:24:02,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7d19339f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:24:02,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] client.ClusterIdFetcher(90): Going to request 215bb12e999f,39621,-1 for getting cluster id 2024-11-23T17:24:02,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:24:02,774 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d' 2024-11-23T17:24:02,777 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:24:02,777 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d" 2024-11-23T17:24:02,777 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1ecb4b83, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:24:02,777 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [215bb12e999f,39621,-1] 2024-11-23T17:24:02,778 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:24:02,778 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:24:02,780 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42600, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:24:02,782 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@344ba78e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:24:02,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:24:02,792 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=215bb12e999f,44667,1732382620918, seqNum=-1] 2024-11-23T17:24:02,793 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:24:02,801 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38194, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:24:02,803 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621. 2024-11-23T17:24:02,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:24:02,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:24:02,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:24:02,806 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:24:02,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@793ab056, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:24:02,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] client.ClusterIdFetcher(90): Going to request 215bb12e999f,39621,-1 for getting cluster id 2024-11-23T17:24:02,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:24:02,808 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d' 2024-11-23T17:24:02,809 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:24:02,809 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d" 2024-11-23T17:24:02,809 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6cae844f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:24:02,809 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [215bb12e999f,39621,-1] 2024-11-23T17:24:02,809 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:24:02,810 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:24:02,811 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42606, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:24:02,813 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2f36c819, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:24:02,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:24:02,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=215bb12e999f,44667,1732382620918, seqNum=-1] 2024-11-23T17:24:02,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:24:02,819 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38210, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:24:02,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1732382625522.41264850465b123187f03a78b2521b39., hostname=215bb12e999f,41065,1732382621000, seqNum=2] 2024-11-23T17:24:02,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:24:02,826 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55242, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:24:02,828 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621. 2024-11-23T17:24:02,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:24:02,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:24:02,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:24:02,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-11-23T17:24:02,830 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:24:02,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-23T17:24:02,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] procedure2.ProcedureExecutor(1139): Stored pid=15, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-11-23T17:24:02,849 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-23T17:24:02,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 15 2024-11-23T17:24:02,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-11-23T17:24:02,860 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-23T17:24:02,886 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-23T17:24:02,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-11-23T17:24:03,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741849_1025 (size=210) 2024-11-23T17:24:03,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741849_1025 (size=210) 2024-11-23T17:24:03,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741849_1025 (size=210) 2024-11-23T17:24:03,067 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-23T17:24:03,068 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 28218a36ba9ccc9cb78380286e1f08cb}, {pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3aa5942783f70257c637cc1abea423ca}] 2024-11-23T17:24:03,075 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3aa5942783f70257c637cc1abea423ca 2024-11-23T17:24:03,076 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 28218a36ba9ccc9cb78380286e1f08cb 2024-11-23T17:24:03,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-11-23T17:24:03,234 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41065 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=17 2024-11-23T17:24:03,235 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45467 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=16 2024-11-23T17:24:03,240 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,1,1732382637834.3aa5942783f70257c637cc1abea423ca. 2024-11-23T17:24:03,246 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(2902): Flushing 3aa5942783f70257c637cc1abea423ca 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-11-23T17:24:03,253 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,,1732382637834.28218a36ba9ccc9cb78380286e1f08cb. 2024-11-23T17:24:03,253 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(2902): Flushing 28218a36ba9ccc9cb78380286e1f08cb 1/1 column families, dataSize=266 B heapSize=832 B 2024-11-23T17:24:03,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-11-23T17:24:03,569 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithSplitRegion/28218a36ba9ccc9cb78380286e1f08cb/.tmp/cf/ae85b8a437be4d00bf70e5a049ed0eaf is 71, key is 0124be457198d69c808897d84a2abb7f/cf:q/1732382642543/Put/seqid=0 2024-11-23T17:24:03,570 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithSplitRegion/3aa5942783f70257c637cc1abea423ca/.tmp/cf/8c47d000c6bc4a349cd0d4d26b4572fd is 71, key is 11c5dbeba143c2a7729d612185f61146/cf:q/1732382642575/Put/seqid=0 2024-11-23T17:24:03,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741850_1026 (size=8256) 2024-11-23T17:24:03,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741850_1026 (size=8256) 2024-11-23T17:24:03,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741850_1026 (size=8256) 2024-11-23T17:24:03,785 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.00 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithSplitRegion/3aa5942783f70257c637cc1abea423ca/.tmp/cf/8c47d000c6bc4a349cd0d4d26b4572fd 2024-11-23T17:24:03,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741851_1027 (size=5354) 2024-11-23T17:24:03,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741851_1027 (size=5354) 2024-11-23T17:24:03,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741851_1027 (size=5354) 2024-11-23T17:24:03,845 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=266 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithSplitRegion/28218a36ba9ccc9cb78380286e1f08cb/.tmp/cf/ae85b8a437be4d00bf70e5a049ed0eaf 2024-11-23T17:24:04,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-11-23T17:24:04,333 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithSplitRegion/3aa5942783f70257c637cc1abea423ca/.tmp/cf/8c47d000c6bc4a349cd0d4d26b4572fd as hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithSplitRegion/3aa5942783f70257c637cc1abea423ca/cf/8c47d000c6bc4a349cd0d4d26b4572fd 2024-11-23T17:24:04,333 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithSplitRegion/28218a36ba9ccc9cb78380286e1f08cb/.tmp/cf/ae85b8a437be4d00bf70e5a049ed0eaf as hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithSplitRegion/28218a36ba9ccc9cb78380286e1f08cb/cf/ae85b8a437be4d00bf70e5a049ed0eaf 2024-11-23T17:24:04,389 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithSplitRegion/28218a36ba9ccc9cb78380286e1f08cb/cf/ae85b8a437be4d00bf70e5a049ed0eaf, entries=4, sequenceid=6, filesize=5.2 K 2024-11-23T17:24:04,392 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithSplitRegion/3aa5942783f70257c637cc1abea423ca/cf/8c47d000c6bc4a349cd0d4d26b4572fd, entries=46, sequenceid=6, filesize=8.1 K 2024-11-23T17:24:04,416 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(3140): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for 3aa5942783f70257c637cc1abea423ca in 1164ms, sequenceid=6, compaction requested=false 2024-11-23T17:24:04,417 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(2603): Flush status journal for 3aa5942783f70257c637cc1abea423ca: 2024-11-23T17:24:04,417 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSplitRegion,1,1732382637834.3aa5942783f70257c637cc1abea423ca. for snaptb0-testExportFileSystemStateWithSplitRegion completed. 2024-11-23T17:24:04,416 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(3140): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for 28218a36ba9ccc9cb78380286e1f08cb in 1145ms, sequenceid=6, compaction requested=false 2024-11-23T17:24:04,419 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(2603): Flush status journal for 28218a36ba9ccc9cb78380286e1f08cb: 2024-11-23T17:24:04,419 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSplitRegion,,1732382637834.28218a36ba9ccc9cb78380286e1f08cb. for snaptb0-testExportFileSystemStateWithSplitRegion completed. 2024-11-23T17:24:04,420 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSplitRegion,,1732382637834.28218a36ba9ccc9cb78380286e1f08cb.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSplitRegion 2024-11-23T17:24:04,420 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:24:04,420 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithSplitRegion/28218a36ba9ccc9cb78380286e1f08cb/cf/ae85b8a437be4d00bf70e5a049ed0eaf] hfiles 2024-11-23T17:24:04,417 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSplitRegion,1,1732382637834.3aa5942783f70257c637cc1abea423ca.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSplitRegion 2024-11-23T17:24:04,424 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:24:04,424 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithSplitRegion/3aa5942783f70257c637cc1abea423ca/cf/8c47d000c6bc4a349cd0d4d26b4572fd] hfiles 2024-11-23T17:24:04,426 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithSplitRegion/3aa5942783f70257c637cc1abea423ca/cf/8c47d000c6bc4a349cd0d4d26b4572fd for snapshot=snaptb0-testExportFileSystemStateWithSplitRegion 2024-11-23T17:24:04,428 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithSplitRegion/28218a36ba9ccc9cb78380286e1f08cb/cf/ae85b8a437be4d00bf70e5a049ed0eaf for snapshot=snaptb0-testExportFileSystemStateWithSplitRegion 2024-11-23T17:24:04,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741852_1028 (size=125) 2024-11-23T17:24:04,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741852_1028 (size=125) 2024-11-23T17:24:04,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741852_1028 (size=125) 2024-11-23T17:24:04,608 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,,1732382637834.28218a36ba9ccc9cb78380286e1f08cb. 2024-11-23T17:24:04,609 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=16 2024-11-23T17:24:04,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.HMaster(4169): Remote procedure done, pid=16 2024-11-23T17:24:04,610 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSplitRegion on region 28218a36ba9ccc9cb78380286e1f08cb 2024-11-23T17:24:04,610 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 28218a36ba9ccc9cb78380286e1f08cb 2024-11-23T17:24:04,635 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=16, ppid=15, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 28218a36ba9ccc9cb78380286e1f08cb in 1.5540 sec 2024-11-23T17:24:04,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741853_1029 (size=125) 2024-11-23T17:24:04,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741853_1029 (size=125) 2024-11-23T17:24:04,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741853_1029 (size=125) 2024-11-23T17:24:04,813 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-23T17:24:05,100 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,1,1732382637834.3aa5942783f70257c637cc1abea423ca. 2024-11-23T17:24:05,100 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-11-23T17:24:05,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.HMaster(4169): Remote procedure done, pid=17 2024-11-23T17:24:05,101 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSplitRegion on region 3aa5942783f70257c637cc1abea423ca 2024-11-23T17:24:05,103 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3aa5942783f70257c637cc1abea423ca 2024-11-23T17:24:05,133 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=17, resume processing ppid=15 2024-11-23T17:24:05,133 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=17, ppid=15, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 3aa5942783f70257c637cc1abea423ca in 2.0490 sec 2024-11-23T17:24:05,134 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-23T17:24:05,137 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-23T17:24:05,149 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-23T17:24:05,149 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithSplitRegion 2024-11-23T17:24:05,155 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSplitRegion 2024-11-23T17:24:05,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-11-23T17:24:05,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741854_1030 (size=675) 2024-11-23T17:24:05,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741854_1030 (size=675) 2024-11-23T17:24:05,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741854_1030 (size=675) 2024-11-23T17:24:05,327 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-23T17:24:05,385 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-23T17:24:05,397 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSplitRegion to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSplitRegion 2024-11-23T17:24:05,405 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-23T17:24:05,405 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 15 2024-11-23T17:24:05,415 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=15, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } in 2.5680 sec 2024-11-23T17:24:07,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-11-23T17:24:07,269 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-11-23T17:24:07,313 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-23T17:24:07,321 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-23T17:24:07,324 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55248, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-23T17:24:07,325 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-23T17:24:07,325 INFO [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41065 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-11-23T17:24:07,328 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38218, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-23T17:24:07,329 INFO [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44667 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-11-23T17:24:07,343 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35432, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-23T17:24:07,348 INFO [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45467 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-11-23T17:24:07,351 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testExportFileSystemStateWithSplitRegion', {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T17:24:07,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] procedure2.ProcedureExecutor(1139): Stored pid=18, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-11-23T17:24:07,361 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T17:24:07,361 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:24:07,361 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportFileSystemStateWithSplitRegion" procId is: 18 2024-11-23T17:24:07,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-11-23T17:24:07,369 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T17:24:07,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-11-23T17:24:07,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741855_1031 (size=390) 2024-11-23T17:24:07,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741855_1031 (size=390) 2024-11-23T17:24:07,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741855_1031 (size=390) 2024-11-23T17:24:07,517 INFO [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 4482cba3a53c5628a185540ec8894b00, NAME => 'testExportFileSystemStateWithSplitRegion,,1732382647351.4482cba3a53c5628a185540ec8894b00.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testExportFileSystemStateWithSplitRegion', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf 2024-11-23T17:24:07,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741856_1032 (size=75) 2024-11-23T17:24:07,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741856_1032 (size=75) 2024-11-23T17:24:07,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741856_1032 (size=75) 2024-11-23T17:24:07,672 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(898): Instantiated testExportFileSystemStateWithSplitRegion,,1732382647351.4482cba3a53c5628a185540ec8894b00.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:24:07,673 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1722): Closing 4482cba3a53c5628a185540ec8894b00, disabling compactions & flushes 2024-11-23T17:24:07,673 INFO [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1755): Closing region testExportFileSystemStateWithSplitRegion,,1732382647351.4482cba3a53c5628a185540ec8894b00. 2024-11-23T17:24:07,673 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportFileSystemStateWithSplitRegion,,1732382647351.4482cba3a53c5628a185540ec8894b00. 2024-11-23T17:24:07,673 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testExportFileSystemStateWithSplitRegion,,1732382647351.4482cba3a53c5628a185540ec8894b00. after waiting 0 ms 2024-11-23T17:24:07,673 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testExportFileSystemStateWithSplitRegion,,1732382647351.4482cba3a53c5628a185540ec8894b00. 2024-11-23T17:24:07,673 INFO [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1973): Closed testExportFileSystemStateWithSplitRegion,,1732382647351.4482cba3a53c5628a185540ec8894b00. 2024-11-23T17:24:07,673 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1676): Region close journal for 4482cba3a53c5628a185540ec8894b00: Waiting for close lock at 1732382647672Disabling compacts and flushes for region at 1732382647672Disabling writes for close at 1732382647673 (+1 ms)Writing region close event to WAL at 1732382647673Closed at 1732382647673 2024-11-23T17:24:07,683 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T17:24:07,684 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportFileSystemStateWithSplitRegion,,1732382647351.4482cba3a53c5628a185540ec8894b00.","families":{"info":[{"qualifier":"regioninfo","vlen":74,"tag":[],"timestamp":"1732382647683"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732382647683"}]},"ts":"1732382647683"} 2024-11-23T17:24:07,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-11-23T17:24:07,695 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-23T17:24:07,705 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T17:24:07,705 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732382647705"}]},"ts":"1732382647705"} 2024-11-23T17:24:07,714 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportFileSystemStateWithSplitRegion, state=ENABLING in hbase:meta 2024-11-23T17:24:07,714 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {215bb12e999f=0} racks are {/default-rack=0} 2024-11-23T17:24:07,724 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-23T17:24:07,725 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-23T17:24:07,725 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-23T17:24:07,725 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-23T17:24:07,725 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-23T17:24:07,725 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-23T17:24:07,725 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-23T17:24:07,725 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-23T17:24:07,725 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-23T17:24:07,725 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-23T17:24:07,725 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=4482cba3a53c5628a185540ec8894b00, ASSIGN}] 2024-11-23T17:24:07,737 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=19, ppid=18, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=4482cba3a53c5628a185540ec8894b00, ASSIGN 2024-11-23T17:24:07,744 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=19, ppid=18, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=4482cba3a53c5628a185540ec8894b00, ASSIGN; state=OFFLINE, location=215bb12e999f,44667,1732382620918; forceNewPlan=false, retain=false 2024-11-23T17:24:07,895 INFO [215bb12e999f:39621 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-23T17:24:07,896 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=19 updating hbase:meta row=4482cba3a53c5628a185540ec8894b00, regionState=OPENING, regionLocation=215bb12e999f,44667,1732382620918 2024-11-23T17:24:07,910 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=19, ppid=18, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=4482cba3a53c5628a185540ec8894b00, ASSIGN because future has completed 2024-11-23T17:24:07,948 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=20, ppid=19, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4482cba3a53c5628a185540ec8894b00, server=215bb12e999f,44667,1732382620918}] 2024-11-23T17:24:08,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-11-23T17:24:08,086 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T17:24:08,180 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] handler.AssignRegionHandler(132): Open testExportFileSystemStateWithSplitRegion,,1732382647351.4482cba3a53c5628a185540ec8894b00. 2024-11-23T17:24:08,180 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(7752): Opening region: {ENCODED => 4482cba3a53c5628a185540ec8894b00, NAME => 'testExportFileSystemStateWithSplitRegion,,1732382647351.4482cba3a53c5628a185540ec8894b00.', STARTKEY => '', ENDKEY => ''} 2024-11-23T17:24:08,181 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportFileSystemStateWithSplitRegion,,1732382647351.4482cba3a53c5628a185540ec8894b00. service=AccessControlService 2024-11-23T17:24:08,181 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:24:08,182 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportFileSystemStateWithSplitRegion 4482cba3a53c5628a185540ec8894b00 2024-11-23T17:24:08,182 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(898): Instantiated testExportFileSystemStateWithSplitRegion,,1732382647351.4482cba3a53c5628a185540ec8894b00.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:24:08,182 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(7794): checking encryption for 4482cba3a53c5628a185540ec8894b00 2024-11-23T17:24:08,182 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(7797): checking classloading for 4482cba3a53c5628a185540ec8894b00 2024-11-23T17:24:08,186 INFO [StoreOpener-4482cba3a53c5628a185540ec8894b00-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 4482cba3a53c5628a185540ec8894b00 2024-11-23T17:24:08,192 INFO [StoreOpener-4482cba3a53c5628a185540ec8894b00-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4482cba3a53c5628a185540ec8894b00 columnFamilyName cf 2024-11-23T17:24:08,192 DEBUG [StoreOpener-4482cba3a53c5628a185540ec8894b00-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:24:08,216 INFO [StoreOpener-4482cba3a53c5628a185540ec8894b00-1 {}] regionserver.HStore(327): Store=4482cba3a53c5628a185540ec8894b00/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T17:24:08,225 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1038): replaying wal for 4482cba3a53c5628a185540ec8894b00 2024-11-23T17:24:08,244 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportFileSystemStateWithSplitRegion/4482cba3a53c5628a185540ec8894b00 2024-11-23T17:24:08,246 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportFileSystemStateWithSplitRegion/4482cba3a53c5628a185540ec8894b00 2024-11-23T17:24:08,247 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1048): stopping wal replay for 4482cba3a53c5628a185540ec8894b00 2024-11-23T17:24:08,247 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1060): Cleaning up temporary data for 4482cba3a53c5628a185540ec8894b00 2024-11-23T17:24:08,253 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1093): writing seq id for 4482cba3a53c5628a185540ec8894b00 2024-11-23T17:24:08,266 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportFileSystemStateWithSplitRegion/4482cba3a53c5628a185540ec8894b00/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T17:24:08,267 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1114): Opened 4482cba3a53c5628a185540ec8894b00; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74905848, jitterRate=0.11618411540985107}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T17:24:08,268 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 4482cba3a53c5628a185540ec8894b00 2024-11-23T17:24:08,269 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1006): Region open journal for 4482cba3a53c5628a185540ec8894b00: Running coprocessor pre-open hook at 1732382648183Writing region info on filesystem at 1732382648183Initializing all the Stores at 1732382648186 (+3 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732382648186Cleaning up temporary data from old regions at 1732382648247 (+61 ms)Running coprocessor post-open hooks at 1732382648268 (+21 ms)Region opened successfully at 1732382648269 (+1 ms) 2024-11-23T17:24:08,274 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportFileSystemStateWithSplitRegion,,1732382647351.4482cba3a53c5628a185540ec8894b00., pid=20, masterSystemTime=1732382648132 2024-11-23T17:24:08,285 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportFileSystemStateWithSplitRegion,,1732382647351.4482cba3a53c5628a185540ec8894b00. 2024-11-23T17:24:08,285 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] handler.AssignRegionHandler(153): Opened testExportFileSystemStateWithSplitRegion,,1732382647351.4482cba3a53c5628a185540ec8894b00. 2024-11-23T17:24:08,289 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=19 updating hbase:meta row=4482cba3a53c5628a185540ec8894b00, regionState=OPEN, openSeqNum=2, regionLocation=215bb12e999f,44667,1732382620918 2024-11-23T17:24:08,302 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=20, ppid=19, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4482cba3a53c5628a185540ec8894b00, server=215bb12e999f,44667,1732382620918 because future has completed 2024-11-23T17:24:08,313 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=20, resume processing ppid=19 2024-11-23T17:24:08,313 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=20, ppid=19, state=SUCCESS, hasLock=false; OpenRegionProcedure 4482cba3a53c5628a185540ec8894b00, server=215bb12e999f,44667,1732382620918 in 358 msec 2024-11-23T17:24:08,328 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=19, resume processing ppid=18 2024-11-23T17:24:08,328 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=19, ppid=18, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=4482cba3a53c5628a185540ec8894b00, ASSIGN in 588 msec 2024-11-23T17:24:08,330 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T17:24:08,331 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732382648331"}]},"ts":"1732382648331"} 2024-11-23T17:24:08,335 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportFileSystemStateWithSplitRegion, state=ENABLED in hbase:meta 2024-11-23T17:24:08,338 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T17:24:08,338 DEBUG [PEWorker-3 {}] access.PermissionStorage(177): Writing permission with rowKey testExportFileSystemStateWithSplitRegion jenkins: RWXCA 2024-11-23T17:24:08,349 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41065 {}] access.PermissionStorage(613): Read acl: entry[testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-11-23T17:24:08,407 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44667-0x10168b8ee290002, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:24:08,408 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x10168b8ee290003, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:24:08,408 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45467-0x10168b8ee290001, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:24:08,408 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:24:08,431 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF\x0AN\x0A\x07jenkins\x12C\x08\x03"?\x0A3\x0A\x07default\x12(testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T17:24:08,432 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T17:24:08,432 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF\x0AN\x0A\x07jenkins\x12C\x08\x03"?\x0A3\x0A\x07default\x12(testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T17:24:08,433 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T17:24:08,439 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF\x0AN\x0A\x07jenkins\x12C\x08\x03"?\x0A3\x0A\x07default\x12(testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T17:24:08,440 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF\x0AN\x0A\x07jenkins\x12C\x08\x03"?\x0A3\x0A\x07default\x12(testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T17:24:08,444 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T17:24:08,447 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T17:24:08,452 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=18, state=SUCCESS, hasLock=false; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion in 1.0800 sec 2024-11-23T17:24:08,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-11-23T17:24:08,519 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-11-23T17:24:08,520 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T17:24:08,525 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-23T17:24:09,806 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportFileSystemStateWithSplitRegion 2024-11-23T17:24:09,806 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportFileSystemStateWithSplitRegion Metrics about Tables on a single HBase RegionServer 2024-11-23T17:24:12,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741857_1033 (size=134217728) 2024-11-23T17:24:12,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741857_1033 (size=134217728) 2024-11-23T17:24:12,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741857_1033 (size=134217728) 2024-11-23T17:24:13,710 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-23T17:24:14,016 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportFileSystemStateWithSplitRegion' 2024-11-23T17:24:15,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741858_1034 (size=134217728) 2024-11-23T17:24:15,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741858_1034 (size=134217728) 2024-11-23T17:24:15,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741858_1034 (size=134217728) 2024-11-23T17:24:16,263 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/output/cf/test_file is 35, key is 1\x00\x00\x00/cf:q/1732382648541/Put/seqid=0 2024-11-23T17:24:16,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741859_1035 (size=51979256) 2024-11-23T17:24:16,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741859_1035 (size=51979256) 2024-11-23T17:24:16,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741859_1035 (size=51979256) 2024-11-23T17:24:16,272 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@cc9acfa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:24:16,272 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 215bb12e999f,39621,-1 for getting cluster id 2024-11-23T17:24:16,273 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:24:16,276 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d' 2024-11-23T17:24:16,276 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:24:16,277 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d" 2024-11-23T17:24:16,277 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6df3952f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:24:16,277 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [215bb12e999f,39621,-1] 2024-11-23T17:24:16,278 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:24:16,308 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:24:16,310 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44058, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:24:16,311 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@ae324c0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:24:16,312 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:24:16,314 DEBUG [RPCClient-NioEventLoopGroup-6-11 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=215bb12e999f,44667,1732382620918, seqNum=-1] 2024-11-23T17:24:16,315 DEBUG [RPCClient-NioEventLoopGroup-6-11 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:24:16,317 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54450, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:24:16,345 WARN [Time-limited test {}] tool.BulkLoadHFilesTool$1(330): Trying to bulk load hfile hdfs://localhost:37605/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/output/cf/test_file with size: 320414712 bytes can be problematic as it may lead to oversplitting. 2024-11-23T17:24:16,345 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-23T17:24:16,347 DEBUG [RPCClient-NioEventLoopGroup-6-11 {}] client.AsyncConnectionImpl(321): The fetched master address is 215bb12e999f,39621,1732382619205 2024-11-23T17:24:16,347 DEBUG [RPCClient-NioEventLoopGroup-6-11 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@3b69d27e 2024-11-23T17:24:16,347 DEBUG [RPCClient-NioEventLoopGroup-6-11 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-23T17:24:16,357 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44070, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-23T17:24:16,370 WARN [IPC Server handler 2 on default port 37605 {}] namenode.FSNamesystem(6314): trying to get DT with no secret manager running 2024-11-23T17:24:16,378 DEBUG [RPCClient-NioEventLoopGroup-6-12 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportFileSystemStateWithSplitRegion', row='', locateType=CURRENT is [region=testExportFileSystemStateWithSplitRegion,,1732382647351.4482cba3a53c5628a185540ec8894b00., hostname=215bb12e999f,44667,1732382620918, seqNum=2] 2024-11-23T17:24:16,396 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportFileSystemStateWithSplitRegion,, stopping at row=testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-11-23T17:24:16,420 INFO [BulkLoadHFilesTool-0 {}] tool.BulkLoadHFilesTool(704): Trying to load hfile=hdfs://localhost:37605/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/output/cf/test_file first=Optional[1\x00\x00\x00] last=Optional[9\x00\x00\x00] 2024-11-23T17:24:16,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44667 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:24:16,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44667 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=215bb12e999f,44667,1732382620918, seqNum=-1] 2024-11-23T17:24:16,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44667 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:24:16,465 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44311, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=ClientService 2024-11-23T17:24:16,466 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44667 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 at org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8304) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2441) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2415) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43516) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-23T17:24:16,470 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44667 {}] ipc.CallRunner(138): callId: 298 service: ClientService methodName: ExecService size: 101 connection: 172.17.0.2:44311 deadline: 1732382716465, exception=org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 2024-11-23T17:24:16,472 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44667 {}] regionserver.SecureBulkLoadManager(227): unable to add token java.util.concurrent.ExecutionException: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 at org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8304) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2441) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2415) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43516) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:396) ~[?:?] at java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073) ~[?:?] at org.apache.hadoop.hbase.regionserver.SecureBulkLoadManager.secureBulkLoadHFiles(SecureBulkLoadManager.java:221) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.bulkLoadHFile(RSRpcServices.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43510) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] Caused by: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 at org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8304) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2441) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2415) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43516) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.RegionCoprocessorRpcChannelImpl.lambda$rpcCall$0(RegionCoprocessorRpcChannelImpl.java:90) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollStreamChannel$EpollStreamUnsafe.epollInReady(AbstractEpollStreamChannel.java:799) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 at org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8304) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2441) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2415) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43516) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollStreamChannel$EpollStreamUnsafe.epollInReady(AbstractEpollStreamChannel.java:799) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T17:24:16,475 WARN [IPC Server handler 2 on default port 37605 {}] namenode.FSNamesystem(6314): trying to get DT with no secret manager running 2024-11-23T17:24:16,507 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44667 {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:37605/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/output/cf/test_file for inclusion in 4482cba3a53c5628a185540ec8894b00/cf 2024-11-23T17:24:16,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44667 {}] regionserver.HStore(626): HFile bounds: first=1\x00\x00\x00 last=9\x00\x00\x00 2024-11-23T17:24:16,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44667 {}] regionserver.HStore(628): Region bounds: first= last= 2024-11-23T17:24:16,521 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44667 {}] regionserver.HStore(641): Trying to bulk load hfile hdfs://localhost:37605/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/output/cf/test_file with size: 320414712 bytes can be problematic as it may lead to oversplitting. 2024-11-23T17:24:16,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44667 {}] regionserver.HRegion(2603): Flush status journal for 4482cba3a53c5628a185540ec8894b00: 2024-11-23T17:24:16,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44667 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(397): Moving hdfs://localhost:37605/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/output/cf/test_file to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/staging/jenkins__testExportFileSystemStateWithSplitRegion__5u5ohrfaub66lruj5s6qtvtk5eam37jvafki0vdi61mqc83hhhii5jf0s7mpapoa/cf/test_file 2024-11-23T17:24:16,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44667 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/staging/jenkins__testExportFileSystemStateWithSplitRegion__5u5ohrfaub66lruj5s6qtvtk5eam37jvafki0vdi61mqc83hhhii5jf0s7mpapoa/cf/test_file as hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportFileSystemStateWithSplitRegion/4482cba3a53c5628a185540ec8894b00/cf/e83dd5a14691421c937355d2fda698e3_SeqId_4_ 2024-11-23T17:24:16,529 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44667 {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/staging/jenkins__testExportFileSystemStateWithSplitRegion__5u5ohrfaub66lruj5s6qtvtk5eam37jvafki0vdi61mqc83hhhii5jf0s7mpapoa/cf/test_file into 4482cba3a53c5628a185540ec8894b00/cf as hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportFileSystemStateWithSplitRegion/4482cba3a53c5628a185540ec8894b00/cf/e83dd5a14691421c937355d2fda698e3_SeqId_4_ - updating store file list. 2024-11-23T17:24:16,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44667 {}] regionserver.HStoreFile(483): HFile Bloom filter type for e83dd5a14691421c937355d2fda698e3_SeqId_4_: NONE, but ROW specified in column family configuration 2024-11-23T17:24:16,561 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44667 {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportFileSystemStateWithSplitRegion/4482cba3a53c5628a185540ec8894b00/cf/e83dd5a14691421c937355d2fda698e3_SeqId_4_ into 4482cba3a53c5628a185540ec8894b00/cf 2024-11-23T17:24:16,561 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44667 {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/staging/jenkins__testExportFileSystemStateWithSplitRegion__5u5ohrfaub66lruj5s6qtvtk5eam37jvafki0vdi61mqc83hhhii5jf0s7mpapoa/cf/test_file into 4482cba3a53c5628a185540ec8894b00/cf (new location: hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportFileSystemStateWithSplitRegion/4482cba3a53c5628a185540ec8894b00/cf/e83dd5a14691421c937355d2fda698e3_SeqId_4_) 2024-11-23T17:24:16,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44667 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(412): Bulk Load done for: hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/staging/jenkins__testExportFileSystemStateWithSplitRegion__5u5ohrfaub66lruj5s6qtvtk5eam37jvafki0vdi61mqc83hhhii5jf0s7mpapoa/cf/test_file 2024-11-23T17:24:16,572 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-23T17:24:16,572 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.tool.BulkLoadHFilesTool.bulkLoad(BulkLoadHFilesTool.java:1125) at org.apache.hadoop.hbase.tool.BulkLoadHFilesTool.run(BulkLoadHFilesTool.java:1176) at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportFileSystemStateWithSplitRegion(TestExportSnapshot.java:229) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T17:24:16,572 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:24:16,573 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:24:16,573 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:24:16,573 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportFileSystemStateWithSplitRegion,, stopping at row=testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-11-23T17:24:16,574 DEBUG [RPCClient-NioEventLoopGroup-6-12 {}] client.AsyncRegionLocatorHelper(64): Try updating region=testExportFileSystemStateWithSplitRegion,,1732382647351.4482cba3a53c5628a185540ec8894b00., hostname=215bb12e999f,44667,1732382620918, seqNum=2 , the old value is region=testExportFileSystemStateWithSplitRegion,,1732382647351.4482cba3a53c5628a185540ec8894b00., hostname=215bb12e999f,44667,1732382620918, seqNum=2, error=org.apache.hadoop.hbase.exceptions.ConnectionClosedException: Call to address=215bb12e999f:44667 failed on local exception: org.apache.hadoop.hbase.exceptions.ConnectionClosedException: Connection closed 2024-11-23T17:24:16,575 WARN [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] util.NettyFutureUtils(65): IO operation failed org.apache.hbase.thirdparty.io.netty.channel.StacklessClosedChannelException: null at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AbstractUnsafe.write(Object, ChannelPromise)(Unknown Source) ~[hbase-shaded-netty-4.1.9.jar:?] 2024-11-23T17:24:16,575 DEBUG [RPCClient-NioEventLoopGroup-6-12 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=testExportFileSystemStateWithSplitRegion,,1732382647351.4482cba3a53c5628a185540ec8894b00., hostname=215bb12e999f,44667,1732382620918, seqNum=2 is org.apache.hadoop.hbase.exceptions.ConnectionClosedException: Connection closed 2024-11-23T17:24:16,575 DEBUG [RPCClient-NioEventLoopGroup-6-12 {}] client.AsyncRegionLocatorHelper(88): Try removing region=testExportFileSystemStateWithSplitRegion,,1732382647351.4482cba3a53c5628a185540ec8894b00., hostname=215bb12e999f,44667,1732382620918, seqNum=2 from cache 2024-11-23T17:24:16,580 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportFileSystemStateWithSplitRegion', row='5', locateType=CURRENT is [region=testExportFileSystemStateWithSplitRegion,,1732382647351.4482cba3a53c5628a185540ec8894b00., hostname=215bb12e999f,44667,1732382620918, seqNum=2] 2024-11-23T17:24:16,589 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.HMaster$3(2313): Client=jenkins//172.17.0.2 split testExportFileSystemStateWithSplitRegion,,1732382647351.4482cba3a53c5628a185540ec8894b00. 2024-11-23T17:24:16,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=215bb12e999f,44667,1732382620918 2024-11-23T17:24:16,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] procedure2.ProcedureExecutor(1139): Stored pid=21, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=4482cba3a53c5628a185540ec8894b00, daughterA=aa2963034c72bba5e0346badbcc8f10a, daughterB=bfb7e4cc6401cf4f4a4036146f100da7 2024-11-23T17:24:16,603 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=21, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=4482cba3a53c5628a185540ec8894b00, daughterA=aa2963034c72bba5e0346badbcc8f10a, daughterB=bfb7e4cc6401cf4f4a4036146f100da7 2024-11-23T17:24:16,603 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=21, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=4482cba3a53c5628a185540ec8894b00, daughterA=aa2963034c72bba5e0346badbcc8f10a, daughterB=bfb7e4cc6401cf4f4a4036146f100da7 2024-11-23T17:24:16,603 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=21, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=4482cba3a53c5628a185540ec8894b00, daughterA=aa2963034c72bba5e0346badbcc8f10a, daughterB=bfb7e4cc6401cf4f4a4036146f100da7 2024-11-23T17:24:16,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-11-23T17:24:16,613 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=22, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=4482cba3a53c5628a185540ec8894b00, UNASSIGN}] 2024-11-23T17:24:16,617 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=22, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=4482cba3a53c5628a185540ec8894b00, UNASSIGN 2024-11-23T17:24:16,625 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=22 updating hbase:meta row=4482cba3a53c5628a185540ec8894b00, regionState=CLOSING, regionLocation=215bb12e999f,44667,1732382620918 2024-11-23T17:24:16,630 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=22, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=4482cba3a53c5628a185540ec8894b00, UNASSIGN because future has completed 2024-11-23T17:24:16,630 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-23T17:24:16,631 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=23, ppid=22, state=RUNNABLE, hasLock=false; CloseRegionProcedure 4482cba3a53c5628a185540ec8894b00, server=215bb12e999f,44667,1732382620918}] 2024-11-23T17:24:16,687 WARN [Async-Client-Retry-Timer-pool-0 {}] client.AsyncNonMetaRegionLocator(265): Failed to locate region in 'testExportFileSystemStateWithSplitRegion', row='', locateType=CURRENT org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=215bb12e999f:44667 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$RpcChannelImplementation.callMethod(AbstractRpcClient.java:628) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$Stub.scan(ClientProtos.java:43851) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.callOpenScanner(AsyncClientScanner.java:177) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.call(AsyncSingleRequestRpcRetryingCaller.java:84) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$doCall$7(AsyncSingleRequestRpcRetryingCaller.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture.uniWhenCompleteStage(CompletableFuture.java:887) ~[?:?] at java.util.concurrent.CompletableFuture.whenComplete(CompletableFuture.java:2325) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.addListener(FutureUtils.java:64) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.doCall(AsyncSingleRequestRpcRetryingCaller.java:108) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.call(AsyncRpcRetryingCaller.java:222) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory$SingleRequestCallerBuilder.call(AsyncRpcRetryingCallerFactory.java:177) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.openScanner(AsyncClientScanner.java:242) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.timelineConsistentRead(ConnectionUtils.java:442) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.openScanner(AsyncClientScanner.java:255) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.start(AsyncClientScanner.java:275) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.scan(RawAsyncTableImpl.java:617) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.scan(RawAsyncTableImpl.java:91) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator.locateInMeta(AsyncNonMetaRegionLocator.java:408) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator.getRegionLocationsInternal(AsyncNonMetaRegionLocator.java:516) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator.getRegionLocations(AsyncNonMetaRegionLocator.java:529) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.lambda$getRegionLocation$7(AsyncRegionLocator.java:164) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.tracedLocationFuture(AsyncRegionLocator.java:106) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.getRegionLocation(AsyncRegionLocator.java:158) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.getRegionLocation(AsyncRegionLocator.java:193) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.doCall(AsyncSingleRequestRpcRetryingCaller.java:109) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.lambda$tryScheduleRetry$1(AsyncRpcRetryingCaller.java:139) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$HashedWheelTimeout.run(HashedWheelTimer.java:713) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.ImmediateExecutor.execute(ImmediateExecutor.java:34) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$HashedWheelTimeout.expire(HashedWheelTimer.java:701) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$HashedWheelBucket.expireTimeouts(HashedWheelTimer.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 34 more 2024-11-23T17:24:16,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-11-23T17:24:17,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-11-23T17:24:17,257 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(122): Close 4482cba3a53c5628a185540ec8894b00 2024-11-23T17:24:17,258 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-23T17:24:17,259 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1722): Closing 4482cba3a53c5628a185540ec8894b00, disabling compactions & flushes 2024-11-23T17:24:17,259 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1755): Closing region testExportFileSystemStateWithSplitRegion,,1732382647351.4482cba3a53c5628a185540ec8894b00. 2024-11-23T17:24:17,259 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1776): Time limited wait for close lock on testExportFileSystemStateWithSplitRegion,,1732382647351.4482cba3a53c5628a185540ec8894b00. 2024-11-23T17:24:17,259 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1843): Acquired close lock on testExportFileSystemStateWithSplitRegion,,1732382647351.4482cba3a53c5628a185540ec8894b00. after waiting 0 ms 2024-11-23T17:24:17,259 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1853): Updates disabled for region testExportFileSystemStateWithSplitRegion,,1732382647351.4482cba3a53c5628a185540ec8894b00. 2024-11-23T17:24:17,401 INFO [AsyncFSWAL-0-hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf-prefix:215bb12e999f,44667,1732382620918 {}] wal.AbstractFSWAL(1368): Slow sync cost: 137 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44591,DS-fb8b94d4-e3db-4dcc-bda0-734fb35ddb07,DISK], DatanodeInfoWithStorage[127.0.0.1:45743,DS-b2901eba-e22f-4426-936a-68cae471a098,DISK], DatanodeInfoWithStorage[127.0.0.1:46761,DS-7a832cf7-34ef-46bd-be01-46509a8fec53,DISK]] 2024-11-23T17:24:17,484 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportFileSystemStateWithSplitRegion/4482cba3a53c5628a185540ec8894b00/recovered.edits/6.seqid, newMaxSeqId=6, maxSeqId=1 2024-11-23T17:24:17,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-11-23T17:24:17,681 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:24:17,682 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1973): Closed testExportFileSystemStateWithSplitRegion,,1732382647351.4482cba3a53c5628a185540ec8894b00. 2024-11-23T17:24:17,682 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1676): Region close journal for 4482cba3a53c5628a185540ec8894b00: Waiting for close lock at 1732382657259Running coprocessor pre-close hooks at 1732382657259Disabling compacts and flushes for region at 1732382657259Disabling writes for close at 1732382657259Writing region close event to WAL at 1732382657263 (+4 ms)Running coprocessor post-close hooks at 1732382657556 (+293 ms)Closed at 1732382657681 (+125 ms) 2024-11-23T17:24:17,818 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(157): Closed 4482cba3a53c5628a185540ec8894b00 2024-11-23T17:24:17,820 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=22 updating hbase:meta row=4482cba3a53c5628a185540ec8894b00, regionState=CLOSED 2024-11-23T17:24:17,824 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=23, ppid=22, state=RUNNABLE, hasLock=false; CloseRegionProcedure 4482cba3a53c5628a185540ec8894b00, server=215bb12e999f,44667,1732382620918 because future has completed 2024-11-23T17:24:17,832 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=23, resume processing ppid=22 2024-11-23T17:24:17,832 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=23, ppid=22, state=SUCCESS, hasLock=false; CloseRegionProcedure 4482cba3a53c5628a185540ec8894b00, server=215bb12e999f,44667,1732382620918 in 1.1950 sec 2024-11-23T17:24:17,838 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=22, resume processing ppid=21 2024-11-23T17:24:17,838 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=22, ppid=21, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=4482cba3a53c5628a185540ec8894b00, UNASSIGN in 1.2190 sec 2024-11-23T17:24:17,858 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:24:17,864 INFO [PEWorker-4 {}] assignment.SplitTableRegionProcedure(728): pid=21 splitting 1 storefiles, region=4482cba3a53c5628a185540ec8894b00, threads=1 2024-11-23T17:24:17,891 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=21 splitting started for store file: hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportFileSystemStateWithSplitRegion/4482cba3a53c5628a185540ec8894b00/cf/e83dd5a14691421c937355d2fda698e3_SeqId_4_ for region: 4482cba3a53c5628a185540ec8894b00 2024-11-23T17:24:17,903 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HStoreFile(483): HFile Bloom filter type for e83dd5a14691421c937355d2fda698e3_SeqId_4_: NONE, but ROW specified in column family configuration 2024-11-23T17:24:17,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741860_1036 (size=21) 2024-11-23T17:24:17,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741860_1036 (size=21) 2024-11-23T17:24:17,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741860_1036 (size=21) 2024-11-23T17:24:17,944 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HStoreFile(483): HFile Bloom filter type for e83dd5a14691421c937355d2fda698e3_SeqId_4_: NONE, but ROW specified in column family configuration 2024-11-23T17:24:17,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741861_1037 (size=21) 2024-11-23T17:24:17,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741861_1037 (size=21) 2024-11-23T17:24:17,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741861_1037 (size=21) 2024-11-23T17:24:17,982 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=21 splitting complete for store file: hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportFileSystemStateWithSplitRegion/4482cba3a53c5628a185540ec8894b00/cf/e83dd5a14691421c937355d2fda698e3_SeqId_4_ for region: 4482cba3a53c5628a185540ec8894b00 2024-11-23T17:24:17,984 DEBUG [PEWorker-4 {}] assignment.SplitTableRegionProcedure(802): pid=21 split storefiles for region 4482cba3a53c5628a185540ec8894b00 Daughter A: [hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportFileSystemStateWithSplitRegion/aa2963034c72bba5e0346badbcc8f10a/cf/e83dd5a14691421c937355d2fda698e3_SeqId_4_.4482cba3a53c5628a185540ec8894b00] storefiles, Daughter B: [hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportFileSystemStateWithSplitRegion/bfb7e4cc6401cf4f4a4036146f100da7/cf/e83dd5a14691421c937355d2fda698e3_SeqId_4_.4482cba3a53c5628a185540ec8894b00] storefiles. 2024-11-23T17:24:18,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741862_1038 (size=76) 2024-11-23T17:24:18,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741862_1038 (size=76) 2024-11-23T17:24:18,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741862_1038 (size=76) 2024-11-23T17:24:18,014 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:24:18,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741863_1039 (size=76) 2024-11-23T17:24:18,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741863_1039 (size=76) 2024-11-23T17:24:18,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741863_1039 (size=76) 2024-11-23T17:24:18,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-11-23T17:24:18,078 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:24:18,106 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportFileSystemStateWithSplitRegion/aa2963034c72bba5e0346badbcc8f10a/recovered.edits/6.seqid, newMaxSeqId=6, maxSeqId=-1 2024-11-23T17:24:18,117 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportFileSystemStateWithSplitRegion/bfb7e4cc6401cf4f4a4036146f100da7/recovered.edits/6.seqid, newMaxSeqId=6, maxSeqId=-1 2024-11-23T17:24:18,128 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"testExportFileSystemStateWithSplitRegion,,1732382647351.4482cba3a53c5628a185540ec8894b00.","families":{"info":[{"qualifier":"regioninfo","vlen":74,"tag":[],"timestamp":"1732382658126"},{"qualifier":"splitA","vlen":75,"tag":[],"timestamp":"1732382658126"},{"qualifier":"splitB","vlen":75,"tag":[],"timestamp":"1732382658126"}]},"ts":"1732382658126"} 2024-11-23T17:24:18,128 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"testExportFileSystemStateWithSplitRegion,,1732382656596.aa2963034c72bba5e0346badbcc8f10a.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1732382658126"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732382658126"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1732382658126"}]},"ts":"1732382658126"} 2024-11-23T17:24:18,128 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"testExportFileSystemStateWithSplitRegion,5,1732382656596.bfb7e4cc6401cf4f4a4036146f100da7.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1732382658126"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732382658126"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1732382658126"}]},"ts":"1732382658126"} 2024-11-23T17:24:18,150 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=24, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=aa2963034c72bba5e0346badbcc8f10a, ASSIGN}, {pid=25, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=bfb7e4cc6401cf4f4a4036146f100da7, ASSIGN}] 2024-11-23T17:24:18,153 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=24, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=aa2963034c72bba5e0346badbcc8f10a, ASSIGN 2024-11-23T17:24:18,153 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=25, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=bfb7e4cc6401cf4f4a4036146f100da7, ASSIGN 2024-11-23T17:24:18,155 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=25, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=bfb7e4cc6401cf4f4a4036146f100da7, ASSIGN; state=SPLITTING_NEW, location=215bb12e999f,44667,1732382620918; forceNewPlan=false, retain=false 2024-11-23T17:24:18,155 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=24, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=aa2963034c72bba5e0346badbcc8f10a, ASSIGN; state=SPLITTING_NEW, location=215bb12e999f,44667,1732382620918; forceNewPlan=false, retain=false 2024-11-23T17:24:18,305 INFO [215bb12e999f:39621 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-23T17:24:18,306 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=25 updating hbase:meta row=bfb7e4cc6401cf4f4a4036146f100da7, regionState=OPENING, regionLocation=215bb12e999f,44667,1732382620918 2024-11-23T17:24:18,307 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=24 updating hbase:meta row=aa2963034c72bba5e0346badbcc8f10a, regionState=OPENING, regionLocation=215bb12e999f,44667,1732382620918 2024-11-23T17:24:18,315 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=25, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=bfb7e4cc6401cf4f4a4036146f100da7, ASSIGN because future has completed 2024-11-23T17:24:18,317 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=26, ppid=25, state=RUNNABLE, hasLock=false; OpenRegionProcedure bfb7e4cc6401cf4f4a4036146f100da7, server=215bb12e999f,44667,1732382620918}] 2024-11-23T17:24:18,318 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=24, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=aa2963034c72bba5e0346badbcc8f10a, ASSIGN because future has completed 2024-11-23T17:24:18,321 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=27, ppid=24, state=RUNNABLE, hasLock=false; OpenRegionProcedure aa2963034c72bba5e0346badbcc8f10a, server=215bb12e999f,44667,1732382620918}] 2024-11-23T17:24:18,480 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] handler.AssignRegionHandler(132): Open testExportFileSystemStateWithSplitRegion,,1732382656596.aa2963034c72bba5e0346badbcc8f10a. 2024-11-23T17:24:18,480 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(7752): Opening region: {ENCODED => aa2963034c72bba5e0346badbcc8f10a, NAME => 'testExportFileSystemStateWithSplitRegion,,1732382656596.aa2963034c72bba5e0346badbcc8f10a.', STARTKEY => '', ENDKEY => '5'} 2024-11-23T17:24:18,481 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportFileSystemStateWithSplitRegion,,1732382656596.aa2963034c72bba5e0346badbcc8f10a. service=AccessControlService 2024-11-23T17:24:18,481 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:24:18,482 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportFileSystemStateWithSplitRegion aa2963034c72bba5e0346badbcc8f10a 2024-11-23T17:24:18,482 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(898): Instantiated testExportFileSystemStateWithSplitRegion,,1732382656596.aa2963034c72bba5e0346badbcc8f10a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:24:18,482 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(7794): checking encryption for aa2963034c72bba5e0346badbcc8f10a 2024-11-23T17:24:18,482 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(7797): checking classloading for aa2963034c72bba5e0346badbcc8f10a 2024-11-23T17:24:18,485 INFO [StoreOpener-aa2963034c72bba5e0346badbcc8f10a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region aa2963034c72bba5e0346badbcc8f10a 2024-11-23T17:24:18,488 INFO [StoreOpener-aa2963034c72bba5e0346badbcc8f10a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region aa2963034c72bba5e0346badbcc8f10a columnFamilyName cf 2024-11-23T17:24:18,488 DEBUG [StoreOpener-aa2963034c72bba5e0346badbcc8f10a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:24:18,539 DEBUG [StoreFileOpener-aa2963034c72bba5e0346badbcc8f10a-cf-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for e83dd5a14691421c937355d2fda698e3_SeqId_4_.4482cba3a53c5628a185540ec8894b00: NONE, but ROW specified in column family configuration 2024-11-23T17:24:18,568 DEBUG [StoreOpener-aa2963034c72bba5e0346badbcc8f10a-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportFileSystemStateWithSplitRegion/aa2963034c72bba5e0346badbcc8f10a/cf/e83dd5a14691421c937355d2fda698e3_SeqId_4_.4482cba3a53c5628a185540ec8894b00->hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportFileSystemStateWithSplitRegion/4482cba3a53c5628a185540ec8894b00/cf/e83dd5a14691421c937355d2fda698e3_SeqId_4_-bottom 2024-11-23T17:24:18,569 INFO [StoreOpener-aa2963034c72bba5e0346badbcc8f10a-1 {}] regionserver.HStore(327): Store=aa2963034c72bba5e0346badbcc8f10a/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T17:24:18,570 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1038): replaying wal for aa2963034c72bba5e0346badbcc8f10a 2024-11-23T17:24:18,571 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportFileSystemStateWithSplitRegion/aa2963034c72bba5e0346badbcc8f10a 2024-11-23T17:24:18,574 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportFileSystemStateWithSplitRegion/aa2963034c72bba5e0346badbcc8f10a 2024-11-23T17:24:18,575 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1048): stopping wal replay for aa2963034c72bba5e0346badbcc8f10a 2024-11-23T17:24:18,575 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1060): Cleaning up temporary data for aa2963034c72bba5e0346badbcc8f10a 2024-11-23T17:24:18,580 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1093): writing seq id for aa2963034c72bba5e0346badbcc8f10a 2024-11-23T17:24:18,581 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1114): Opened aa2963034c72bba5e0346badbcc8f10a; next sequenceid=7; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69853932, jitterRate=0.040904700756073}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T17:24:18,581 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1122): Running coprocessor post-open hooks for aa2963034c72bba5e0346badbcc8f10a 2024-11-23T17:24:18,582 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1006): Region open journal for aa2963034c72bba5e0346badbcc8f10a: Running coprocessor pre-open hook at 1732382658482Writing region info on filesystem at 1732382658482Initializing all the Stores at 1732382658484 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732382658484Cleaning up temporary data from old regions at 1732382658575 (+91 ms)Running coprocessor post-open hooks at 1732382658581 (+6 ms)Region opened successfully at 1732382658582 (+1 ms) 2024-11-23T17:24:18,584 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportFileSystemStateWithSplitRegion,,1732382656596.aa2963034c72bba5e0346badbcc8f10a., pid=27, masterSystemTime=1732382658474 2024-11-23T17:24:18,585 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.CompactSplit(342): Ignoring compaction request for testExportFileSystemStateWithSplitRegion,,1732382656596.aa2963034c72bba5e0346badbcc8f10a.,because compaction is disabled. 2024-11-23T17:24:18,588 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportFileSystemStateWithSplitRegion,,1732382656596.aa2963034c72bba5e0346badbcc8f10a. 2024-11-23T17:24:18,588 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] handler.AssignRegionHandler(153): Opened testExportFileSystemStateWithSplitRegion,,1732382656596.aa2963034c72bba5e0346badbcc8f10a. 2024-11-23T17:24:18,589 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] handler.AssignRegionHandler(132): Open testExportFileSystemStateWithSplitRegion,5,1732382656596.bfb7e4cc6401cf4f4a4036146f100da7. 2024-11-23T17:24:18,589 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(7752): Opening region: {ENCODED => bfb7e4cc6401cf4f4a4036146f100da7, NAME => 'testExportFileSystemStateWithSplitRegion,5,1732382656596.bfb7e4cc6401cf4f4a4036146f100da7.', STARTKEY => '5', ENDKEY => ''} 2024-11-23T17:24:18,589 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=24 updating hbase:meta row=aa2963034c72bba5e0346badbcc8f10a, regionState=OPEN, openSeqNum=7, regionLocation=215bb12e999f,44667,1732382620918 2024-11-23T17:24:18,589 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportFileSystemStateWithSplitRegion,5,1732382656596.bfb7e4cc6401cf4f4a4036146f100da7. service=AccessControlService 2024-11-23T17:24:18,590 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:24:18,590 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportFileSystemStateWithSplitRegion bfb7e4cc6401cf4f4a4036146f100da7 2024-11-23T17:24:18,590 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(898): Instantiated testExportFileSystemStateWithSplitRegion,5,1732382656596.bfb7e4cc6401cf4f4a4036146f100da7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:24:18,590 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(7794): checking encryption for bfb7e4cc6401cf4f4a4036146f100da7 2024-11-23T17:24:18,590 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(7797): checking classloading for bfb7e4cc6401cf4f4a4036146f100da7 2024-11-23T17:24:18,595 INFO [StoreOpener-bfb7e4cc6401cf4f4a4036146f100da7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region bfb7e4cc6401cf4f4a4036146f100da7 2024-11-23T17:24:18,597 INFO [StoreOpener-bfb7e4cc6401cf4f4a4036146f100da7-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region bfb7e4cc6401cf4f4a4036146f100da7 columnFamilyName cf 2024-11-23T17:24:18,597 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=27, ppid=24, state=RUNNABLE, hasLock=false; OpenRegionProcedure aa2963034c72bba5e0346badbcc8f10a, server=215bb12e999f,44667,1732382620918 because future has completed 2024-11-23T17:24:18,597 DEBUG [StoreOpener-bfb7e4cc6401cf4f4a4036146f100da7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:24:18,603 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=27, resume processing ppid=24 2024-11-23T17:24:18,603 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=27, ppid=24, state=SUCCESS, hasLock=false; OpenRegionProcedure aa2963034c72bba5e0346badbcc8f10a, server=215bb12e999f,44667,1732382620918 in 278 msec 2024-11-23T17:24:18,606 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=24, ppid=21, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=aa2963034c72bba5e0346badbcc8f10a, ASSIGN in 453 msec 2024-11-23T17:24:18,613 DEBUG [StoreFileOpener-bfb7e4cc6401cf4f4a4036146f100da7-cf-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for e83dd5a14691421c937355d2fda698e3_SeqId_4_.4482cba3a53c5628a185540ec8894b00: NONE, but ROW specified in column family configuration 2024-11-23T17:24:18,617 DEBUG [StoreOpener-bfb7e4cc6401cf4f4a4036146f100da7-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportFileSystemStateWithSplitRegion/bfb7e4cc6401cf4f4a4036146f100da7/cf/e83dd5a14691421c937355d2fda698e3_SeqId_4_.4482cba3a53c5628a185540ec8894b00->hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportFileSystemStateWithSplitRegion/4482cba3a53c5628a185540ec8894b00/cf/e83dd5a14691421c937355d2fda698e3_SeqId_4_-top 2024-11-23T17:24:18,618 INFO [StoreOpener-bfb7e4cc6401cf4f4a4036146f100da7-1 {}] regionserver.HStore(327): Store=bfb7e4cc6401cf4f4a4036146f100da7/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T17:24:18,618 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1038): replaying wal for bfb7e4cc6401cf4f4a4036146f100da7 2024-11-23T17:24:18,619 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportFileSystemStateWithSplitRegion/bfb7e4cc6401cf4f4a4036146f100da7 2024-11-23T17:24:18,621 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportFileSystemStateWithSplitRegion/bfb7e4cc6401cf4f4a4036146f100da7 2024-11-23T17:24:18,621 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1048): stopping wal replay for bfb7e4cc6401cf4f4a4036146f100da7 2024-11-23T17:24:18,621 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1060): Cleaning up temporary data for bfb7e4cc6401cf4f4a4036146f100da7 2024-11-23T17:24:18,624 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1093): writing seq id for bfb7e4cc6401cf4f4a4036146f100da7 2024-11-23T17:24:18,626 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1114): Opened bfb7e4cc6401cf4f4a4036146f100da7; next sequenceid=7; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68695413, jitterRate=0.023641422390937805}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T17:24:18,626 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1122): Running coprocessor post-open hooks for bfb7e4cc6401cf4f4a4036146f100da7 2024-11-23T17:24:18,626 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1006): Region open journal for bfb7e4cc6401cf4f4a4036146f100da7: Running coprocessor pre-open hook at 1732382658590Writing region info on filesystem at 1732382658590Initializing all the Stores at 1732382658593 (+3 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732382658593Cleaning up temporary data from old regions at 1732382658621 (+28 ms)Running coprocessor post-open hooks at 1732382658626 (+5 ms)Region opened successfully at 1732382658626 2024-11-23T17:24:18,632 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportFileSystemStateWithSplitRegion,5,1732382656596.bfb7e4cc6401cf4f4a4036146f100da7., pid=26, masterSystemTime=1732382658474 2024-11-23T17:24:18,633 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.CompactSplit(342): Ignoring compaction request for testExportFileSystemStateWithSplitRegion,5,1732382656596.bfb7e4cc6401cf4f4a4036146f100da7.,because compaction is disabled. 2024-11-23T17:24:18,648 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportFileSystemStateWithSplitRegion,5,1732382656596.bfb7e4cc6401cf4f4a4036146f100da7. 2024-11-23T17:24:18,648 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] handler.AssignRegionHandler(153): Opened testExportFileSystemStateWithSplitRegion,5,1732382656596.bfb7e4cc6401cf4f4a4036146f100da7. 2024-11-23T17:24:18,648 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=25 updating hbase:meta row=bfb7e4cc6401cf4f4a4036146f100da7, regionState=OPEN, openSeqNum=7, regionLocation=215bb12e999f,44667,1732382620918 2024-11-23T17:24:18,652 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=26, ppid=25, state=RUNNABLE, hasLock=false; OpenRegionProcedure bfb7e4cc6401cf4f4a4036146f100da7, server=215bb12e999f,44667,1732382620918 because future has completed 2024-11-23T17:24:18,660 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=26, resume processing ppid=25 2024-11-23T17:24:18,660 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=26, ppid=25, state=SUCCESS, hasLock=false; OpenRegionProcedure bfb7e4cc6401cf4f4a4036146f100da7, server=215bb12e999f,44667,1732382620918 in 336 msec 2024-11-23T17:24:18,665 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=25, resume processing ppid=21 2024-11-23T17:24:18,665 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=25, ppid=21, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=bfb7e4cc6401cf4f4a4036146f100da7, ASSIGN in 510 msec 2024-11-23T17:24:18,669 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=21, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=4482cba3a53c5628a185540ec8894b00, daughterA=aa2963034c72bba5e0346badbcc8f10a, daughterB=bfb7e4cc6401cf4f4a4036146f100da7 in 2.0680 sec 2024-11-23T17:24:19,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-11-23T17:24:19,093 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SPLIT_REGION, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-11-23T17:24:19,093 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportFileSystemStateWithSplitRegion,, stopping at row=testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-11-23T17:24:19,100 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-11-23T17:24:19,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732382659100 (current time:1732382659100). 2024-11-23T17:24:19,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-23T17:24:19,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snapshot-testExportFileSystemStateWithSplitRegion VERSION not specified, setting to 2 2024-11-23T17:24:19,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-23T17:24:19,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7de82d8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:24:19,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] client.ClusterIdFetcher(90): Going to request 215bb12e999f,39621,-1 for getting cluster id 2024-11-23T17:24:19,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:24:19,103 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d' 2024-11-23T17:24:19,103 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:24:19,104 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d" 2024-11-23T17:24:19,104 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1d0180e3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:24:19,104 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [215bb12e999f,39621,-1] 2024-11-23T17:24:19,104 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:24:19,105 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:24:19,106 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44092, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:24:19,107 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c72927, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:24:19,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:24:19,109 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=215bb12e999f,44667,1732382620918, seqNum=-1] 2024-11-23T17:24:19,110 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:24:19,111 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54458, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:24:19,113 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621. 2024-11-23T17:24:19,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:24:19,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:24:19,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:24:19,116 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:24:19,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4437599c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:24:19,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] client.ClusterIdFetcher(90): Going to request 215bb12e999f,39621,-1 for getting cluster id 2024-11-23T17:24:19,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:24:19,118 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d' 2024-11-23T17:24:19,119 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:24:19,119 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d" 2024-11-23T17:24:19,120 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@b2fe172, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:24:19,120 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [215bb12e999f,39621,-1] 2024-11-23T17:24:19,120 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:24:19,120 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:24:19,122 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44118, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:24:19,124 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6dd0166b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:24:19,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:24:19,126 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=215bb12e999f,44667,1732382620918, seqNum=-1] 2024-11-23T17:24:19,126 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:24:19,128 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54474, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:24:19,132 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1732382625522.41264850465b123187f03a78b2521b39., hostname=215bb12e999f,41065,1732382621000, seqNum=2] 2024-11-23T17:24:19,132 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:24:19,134 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57502, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:24:19,136 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621. 2024-11-23T17:24:19,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:24:19,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:24:19,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:24:19,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] access.PermissionStorage(613): Read acl: entry[testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-11-23T17:24:19,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-23T17:24:19,142 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:24:19,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] procedure2.ProcedureExecutor(1139): Stored pid=28, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-11-23T17:24:19,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 28 2024-11-23T17:24:19,148 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-23T17:24:19,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=28 2024-11-23T17:24:19,151 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-23T17:24:19,161 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-23T17:24:19,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741864_1040 (size=197) 2024-11-23T17:24:19,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741864_1040 (size=197) 2024-11-23T17:24:19,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741864_1040 (size=197) 2024-11-23T17:24:19,231 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-23T17:24:19,231 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=29, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure aa2963034c72bba5e0346badbcc8f10a}, {pid=30, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure bfb7e4cc6401cf4f4a4036146f100da7}] 2024-11-23T17:24:19,237 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=30, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure bfb7e4cc6401cf4f4a4036146f100da7 2024-11-23T17:24:19,238 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=29, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure aa2963034c72bba5e0346badbcc8f10a 2024-11-23T17:24:19,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=28 2024-11-23T17:24:19,391 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44667 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=30 2024-11-23T17:24:19,391 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44667 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=29 2024-11-23T17:24:19,391 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportFileSystemStateWithSplitRegion,5,1732382656596.bfb7e4cc6401cf4f4a4036146f100da7. 2024-11-23T17:24:19,392 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.HRegion(2603): Flush status journal for bfb7e4cc6401cf4f4a4036146f100da7: 2024-11-23T17:24:19,392 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportFileSystemStateWithSplitRegion,,1732382656596.aa2963034c72bba5e0346badbcc8f10a. 2024-11-23T17:24:19,392 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportFileSystemStateWithSplitRegion,5,1732382656596.bfb7e4cc6401cf4f4a4036146f100da7. for snapshot-testExportFileSystemStateWithSplitRegion completed. 2024-11-23T17:24:19,392 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.HRegion(2603): Flush status journal for aa2963034c72bba5e0346badbcc8f10a: 2024-11-23T17:24:19,392 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportFileSystemStateWithSplitRegion,,1732382656596.aa2963034c72bba5e0346badbcc8f10a. for snapshot-testExportFileSystemStateWithSplitRegion completed. 2024-11-23T17:24:19,392 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] snapshot.SnapshotManifest(241): Storing 'testExportFileSystemStateWithSplitRegion,5,1732382656596.bfb7e4cc6401cf4f4a4036146f100da7.' region-info for snapshot=snapshot-testExportFileSystemStateWithSplitRegion 2024-11-23T17:24:19,392 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:24:19,392 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] snapshot.SnapshotManifest(241): Storing 'testExportFileSystemStateWithSplitRegion,,1732382656596.aa2963034c72bba5e0346badbcc8f10a.' region-info for snapshot=snapshot-testExportFileSystemStateWithSplitRegion 2024-11-23T17:24:19,392 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:24:19,392 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportFileSystemStateWithSplitRegion/aa2963034c72bba5e0346badbcc8f10a/cf/e83dd5a14691421c937355d2fda698e3_SeqId_4_.4482cba3a53c5628a185540ec8894b00->hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportFileSystemStateWithSplitRegion/4482cba3a53c5628a185540ec8894b00/cf/e83dd5a14691421c937355d2fda698e3_SeqId_4_-bottom] hfiles 2024-11-23T17:24:19,392 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportFileSystemStateWithSplitRegion/bfb7e4cc6401cf4f4a4036146f100da7/cf/e83dd5a14691421c937355d2fda698e3_SeqId_4_.4482cba3a53c5628a185540ec8894b00->hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportFileSystemStateWithSplitRegion/4482cba3a53c5628a185540ec8894b00/cf/e83dd5a14691421c937355d2fda698e3_SeqId_4_-top] hfiles 2024-11-23T17:24:19,392 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportFileSystemStateWithSplitRegion/bfb7e4cc6401cf4f4a4036146f100da7/cf/e83dd5a14691421c937355d2fda698e3_SeqId_4_.4482cba3a53c5628a185540ec8894b00 for snapshot=snapshot-testExportFileSystemStateWithSplitRegion 2024-11-23T17:24:19,392 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportFileSystemStateWithSplitRegion/aa2963034c72bba5e0346badbcc8f10a/cf/e83dd5a14691421c937355d2fda698e3_SeqId_4_.4482cba3a53c5628a185540ec8894b00 for snapshot=snapshot-testExportFileSystemStateWithSplitRegion 2024-11-23T17:24:19,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741866_1042 (size=182) 2024-11-23T17:24:19,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741866_1042 (size=182) 2024-11-23T17:24:19,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741866_1042 (size=182) 2024-11-23T17:24:19,416 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportFileSystemStateWithSplitRegion,,1732382656596.aa2963034c72bba5e0346badbcc8f10a. 2024-11-23T17:24:19,416 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=29 2024-11-23T17:24:19,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.HMaster(4169): Remote procedure done, pid=29 2024-11-23T17:24:19,417 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportFileSystemStateWithSplitRegion on region aa2963034c72bba5e0346badbcc8f10a 2024-11-23T17:24:19,417 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=29, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure aa2963034c72bba5e0346badbcc8f10a 2024-11-23T17:24:19,421 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=29, ppid=28, state=SUCCESS, hasLock=false; SnapshotRegionProcedure aa2963034c72bba5e0346badbcc8f10a in 187 msec 2024-11-23T17:24:19,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741865_1041 (size=182) 2024-11-23T17:24:19,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741865_1041 (size=182) 2024-11-23T17:24:19,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741865_1041 (size=182) 2024-11-23T17:24:19,428 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportFileSystemStateWithSplitRegion,5,1732382656596.bfb7e4cc6401cf4f4a4036146f100da7. 2024-11-23T17:24:19,428 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=30 2024-11-23T17:24:19,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.HMaster(4169): Remote procedure done, pid=30 2024-11-23T17:24:19,429 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportFileSystemStateWithSplitRegion on region bfb7e4cc6401cf4f4a4036146f100da7 2024-11-23T17:24:19,429 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=30, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure bfb7e4cc6401cf4f4a4036146f100da7 2024-11-23T17:24:19,435 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=30, resume processing ppid=28 2024-11-23T17:24:19,436 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=30, ppid=28, state=SUCCESS, hasLock=false; SnapshotRegionProcedure bfb7e4cc6401cf4f4a4036146f100da7 in 199 msec 2024-11-23T17:24:19,436 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-23T17:24:19,439 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-23T17:24:19,439 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-23T17:24:19,439 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:24:19,441 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportFileSystemStateWithSplitRegion/4482cba3a53c5628a185540ec8894b00/cf/e83dd5a14691421c937355d2fda698e3_SeqId_4_] hfiles 2024-11-23T17:24:19,441 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/1): hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportFileSystemStateWithSplitRegion/4482cba3a53c5628a185540ec8894b00/cf/e83dd5a14691421c937355d2fda698e3_SeqId_4_ 2024-11-23T17:24:19,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741867_1043 (size=129) 2024-11-23T17:24:19,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741867_1043 (size=129) 2024-11-23T17:24:19,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741867_1043 (size=129) 2024-11-23T17:24:19,457 INFO [SplitRegionsSnapshotPool-pool-0 {}] procedure.SnapshotProcedure$1(378): take snapshot region={ENCODED => 4482cba3a53c5628a185540ec8894b00, NAME => 'testExportFileSystemStateWithSplitRegion,,1732382647351.4482cba3a53c5628a185540ec8894b00.', STARTKEY => '', ENDKEY => '', OFFLINE => true, SPLIT => true}, table=testExportFileSystemStateWithSplitRegion 2024-11-23T17:24:19,463 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-23T17:24:19,465 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-23T17:24:19,465 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snapshot-testExportFileSystemStateWithSplitRegion 2024-11-23T17:24:19,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=28 2024-11-23T17:24:19,467 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/.tmp/snapshot-testExportFileSystemStateWithSplitRegion 2024-11-23T17:24:19,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741868_1044 (size=891) 2024-11-23T17:24:19,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741868_1044 (size=891) 2024-11-23T17:24:19,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741868_1044 (size=891) 2024-11-23T17:24:19,516 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-23T17:24:19,536 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-23T17:24:19,536 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/.tmp/snapshot-testExportFileSystemStateWithSplitRegion to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion 2024-11-23T17:24:19,540 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-23T17:24:19,541 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 28 2024-11-23T17:24:19,543 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=28, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } in 403 msec 2024-11-23T17:24:19,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=28 2024-11-23T17:24:19,776 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-11-23T17:24:19,777 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732382659777 2024-11-23T17:24:19,777 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:37605, tgtDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732382659777, rawTgtDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732382659777, srcFsUri=hdfs://localhost:37605, srcDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf 2024-11-23T17:24:19,833 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:37605, inputRoot=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf 2024-11-23T17:24:19,833 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1325804072_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732382659777, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732382659777/.hbase-snapshot/.tmp/snapshot-testExportFileSystemStateWithSplitRegion 2024-11-23T17:24:19,839 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-11-23T17:24:19,848 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732382659777/.hbase-snapshot/.tmp/snapshot-testExportFileSystemStateWithSplitRegion 2024-11-23T17:24:19,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741870_1046 (size=197) 2024-11-23T17:24:19,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741870_1046 (size=197) 2024-11-23T17:24:19,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741870_1046 (size=197) 2024-11-23T17:24:19,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741869_1045 (size=891) 2024-11-23T17:24:19,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741869_1045 (size=891) 2024-11-23T17:24:19,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741869_1045 (size=891) 2024-11-23T17:24:20,317 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:24:20,317 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:24:20,317 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:24:21,867 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/hadoop-9138925338812258904.jar 2024-11-23T17:24:21,868 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:24:21,868 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:24:21,973 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/hadoop-982083001681417215.jar 2024-11-23T17:24:21,974 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:24:21,975 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:24:21,975 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:24:21,976 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:24:21,976 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:24:21,977 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:24:21,977 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-23T17:24:21,978 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-23T17:24:21,978 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-23T17:24:21,978 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-23T17:24:21,979 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-23T17:24:21,979 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-23T17:24:21,980 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-23T17:24:21,980 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-23T17:24:21,981 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-23T17:24:21,981 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-23T17:24:21,981 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-23T17:24:21,984 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:24:21,984 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:24:21,985 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T17:24:21,985 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:24:21,985 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:24:21,986 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T17:24:21,986 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T17:24:23,185 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-23T17:24:23,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741871_1047 (size=131440) 2024-11-23T17:24:23,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741871_1047 (size=131440) 2024-11-23T17:24:23,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741871_1047 (size=131440) 2024-11-23T17:24:23,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741872_1048 (size=4188619) 2024-11-23T17:24:23,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741872_1048 (size=4188619) 2024-11-23T17:24:23,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741872_1048 (size=4188619) 2024-11-23T17:24:23,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741873_1049 (size=1323991) 2024-11-23T17:24:23,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741873_1049 (size=1323991) 2024-11-23T17:24:23,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741873_1049 (size=1323991) 2024-11-23T17:24:23,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741874_1050 (size=903733) 2024-11-23T17:24:23,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741874_1050 (size=903733) 2024-11-23T17:24:23,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741874_1050 (size=903733) 2024-11-23T17:24:24,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741875_1051 (size=8360083) 2024-11-23T17:24:24,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741875_1051 (size=8360083) 2024-11-23T17:24:24,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741875_1051 (size=8360083) 2024-11-23T17:24:24,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741876_1052 (size=1877034) 2024-11-23T17:24:24,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741876_1052 (size=1877034) 2024-11-23T17:24:24,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741876_1052 (size=1877034) 2024-11-23T17:24:24,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741877_1053 (size=77835) 2024-11-23T17:24:24,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741877_1053 (size=77835) 2024-11-23T17:24:24,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741877_1053 (size=77835) 2024-11-23T17:24:24,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741878_1054 (size=30949) 2024-11-23T17:24:24,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741878_1054 (size=30949) 2024-11-23T17:24:24,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741878_1054 (size=30949) 2024-11-23T17:24:25,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741879_1055 (size=1597347) 2024-11-23T17:24:25,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741879_1055 (size=1597347) 2024-11-23T17:24:25,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741879_1055 (size=1597347) 2024-11-23T17:24:26,769 INFO [master/215bb12e999f:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-23T17:24:26,769 INFO [master/215bb12e999f:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-23T17:24:27,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741880_1056 (size=4695811) 2024-11-23T17:24:27,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741880_1056 (size=4695811) 2024-11-23T17:24:27,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741880_1056 (size=4695811) 2024-11-23T17:24:27,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741881_1057 (size=232957) 2024-11-23T17:24:27,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741881_1057 (size=232957) 2024-11-23T17:24:27,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741881_1057 (size=232957) 2024-11-23T17:24:27,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741882_1058 (size=440957) 2024-11-23T17:24:27,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741882_1058 (size=440957) 2024-11-23T17:24:27,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741882_1058 (size=440957) 2024-11-23T17:24:27,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741883_1059 (size=127628) 2024-11-23T17:24:27,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741883_1059 (size=127628) 2024-11-23T17:24:27,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741883_1059 (size=127628) 2024-11-23T17:24:27,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741884_1060 (size=20406) 2024-11-23T17:24:27,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741884_1060 (size=20406) 2024-11-23T17:24:27,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741884_1060 (size=20406) 2024-11-23T17:24:35,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741885_1061 (size=5175431) 2024-11-23T17:24:35,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741885_1061 (size=5175431) 2024-11-23T17:24:35,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741885_1061 (size=5175431) 2024-11-23T17:24:35,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741886_1062 (size=217634) 2024-11-23T17:24:35,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741886_1062 (size=217634) 2024-11-23T17:24:35,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741886_1062 (size=217634) 2024-11-23T17:24:35,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741887_1063 (size=6424742) 2024-11-23T17:24:35,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741887_1063 (size=6424742) 2024-11-23T17:24:35,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741887_1063 (size=6424742) 2024-11-23T17:24:35,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741888_1064 (size=1832290) 2024-11-23T17:24:35,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741888_1064 (size=1832290) 2024-11-23T17:24:35,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741888_1064 (size=1832290) 2024-11-23T17:24:36,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741889_1065 (size=322274) 2024-11-23T17:24:36,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741889_1065 (size=322274) 2024-11-23T17:24:36,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741889_1065 (size=322274) 2024-11-23T17:24:36,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741890_1066 (size=503880) 2024-11-23T17:24:36,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741890_1066 (size=503880) 2024-11-23T17:24:36,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741890_1066 (size=503880) 2024-11-23T17:24:36,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741891_1067 (size=29229) 2024-11-23T17:24:36,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741891_1067 (size=29229) 2024-11-23T17:24:36,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741891_1067 (size=29229) 2024-11-23T17:24:36,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741892_1068 (size=24096) 2024-11-23T17:24:36,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741892_1068 (size=24096) 2024-11-23T17:24:36,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741892_1068 (size=24096) 2024-11-23T17:24:36,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741893_1069 (size=111872) 2024-11-23T17:24:36,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741893_1069 (size=111872) 2024-11-23T17:24:36,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741893_1069 (size=111872) 2024-11-23T17:24:36,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741894_1070 (size=45609) 2024-11-23T17:24:36,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741894_1070 (size=45609) 2024-11-23T17:24:36,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741894_1070 (size=45609) 2024-11-23T17:24:36,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741895_1071 (size=136454) 2024-11-23T17:24:36,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741895_1071 (size=136454) 2024-11-23T17:24:36,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741895_1071 (size=136454) 2024-11-23T17:24:37,362 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-23T17:24:37,367 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snapshot-testExportFileSystemStateWithSplitRegion' hfile list 2024-11-23T17:24:37,380 DEBUG [Time-limited test {}] snapshot.ExportSnapshot$1(689): Skip the existing file: cf/testExportFileSystemStateWithSplitRegion=4482cba3a53c5628a185540ec8894b00-e83dd5a14691421c937355d2fda698e3_SeqId_4_. 2024-11-23T17:24:37,380 DEBUG [Time-limited test {}] snapshot.ExportSnapshot$1(689): Skip the existing file: cf/testExportFileSystemStateWithSplitRegion=4482cba3a53c5628a185540ec8894b00-e83dd5a14691421c937355d2fda698e3_SeqId_4_. 2024-11-23T17:24:37,381 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=305.6 M 2024-11-23T17:24:37,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741896_1072 (size=244) 2024-11-23T17:24:37,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741896_1072 (size=244) 2024-11-23T17:24:37,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741896_1072 (size=244) 2024-11-23T17:24:37,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741897_1073 (size=17) 2024-11-23T17:24:37,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741897_1073 (size=17) 2024-11-23T17:24:37,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741897_1073 (size=17) 2024-11-23T17:24:37,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741898_1074 (size=304052) 2024-11-23T17:24:37,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741898_1074 (size=304052) 2024-11-23T17:24:37,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741898_1074 (size=304052) 2024-11-23T17:24:38,086 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T17:24:39,728 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-23T17:24:39,728 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-23T17:24:40,054 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732382632246_0001_000001 (auth:SIMPLE) from 127.0.0.1:38486 2024-11-23T17:24:43,793 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 3aa5942783f70257c637cc1abea423ca, had cached 0 bytes from a total of 8256 2024-11-23T17:24:43,844 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 28218a36ba9ccc9cb78380286e1f08cb, had cached 0 bytes from a total of 5354 2024-11-23T17:24:46,112 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1325804072_22 at /127.0.0.1:55808 [Receiving block BP-931987288-172.17.0.2-1732382611165:blk_1073741830_1006] {}] datanode.BlockReceiver(767): Slow BlockReceiver write data to disk cost: 2660ms (threshold=300ms), volume=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data2/, blockId=1073741830, seqno=266 2024-11-23T17:24:46,115 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1325804072_22 at /127.0.0.1:38074 [Receiving block BP-931987288-172.17.0.2-1732382611165:blk_1073741830_1006] {}] datanode.BlockReceiver(767): Slow BlockReceiver write data to disk cost: 2663ms (threshold=300ms), volume=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data6/, blockId=1073741830, seqno=266 2024-11-23T17:24:46,112 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1325804072_22 at /127.0.0.1:53028 [Receiving block BP-931987288-172.17.0.2-1732382611165:blk_1073741830_1006] {}] datanode.BlockReceiver(767): Slow BlockReceiver write data to disk cost: 2660ms (threshold=300ms), volume=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data4/, blockId=1073741830, seqno=266 2024-11-23T17:24:46,119 INFO [AsyncFSWAL-0-hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/MasterData-prefix:215bb12e999f,39621,1732382619205 {}] wal.AbstractFSWAL(1368): Slow sync cost: 2667 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:44591,DS-fb8b94d4-e3db-4dcc-bda0-734fb35ddb07,DISK], DatanodeInfoWithStorage[127.0.0.1:45743,DS-b2901eba-e22f-4426-936a-68cae471a098,DISK], DatanodeInfoWithStorage[127.0.0.1:46761,DS-7a832cf7-34ef-46bd-be01-46509a8fec53,DISK]] 2024-11-23T17:24:47,276 DEBUG [master/215bb12e999f:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 41264850465b123187f03a78b2521b39 changed from -1.0 to 0.0, refreshing cache 2024-11-23T17:24:47,284 DEBUG [master/215bb12e999f:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 3aa5942783f70257c637cc1abea423ca changed from -1.0 to 0.0, refreshing cache 2024-11-23T17:24:47,285 DEBUG [master/215bb12e999f:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 28218a36ba9ccc9cb78380286e1f08cb changed from -1.0 to 0.0, refreshing cache 2024-11-23T17:24:54,101 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732382632246_0001_000001 (auth:SIMPLE) from 127.0.0.1:39504 2024-11-23T17:24:54,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741899_1075 (size=349750) 2024-11-23T17:24:54,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741899_1075 (size=349750) 2024-11-23T17:24:54,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741899_1075 (size=349750) 2024-11-23T17:24:56,509 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732382632246_0001_000001 (auth:SIMPLE) from 127.0.0.1:59088 2024-11-23T17:25:03,482 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region aa2963034c72bba5e0346badbcc8f10a, had cached 0 bytes from a total of 320414712 2024-11-23T17:25:03,590 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region bfb7e4cc6401cf4f4a4036146f100da7, had cached 0 bytes from a total of 320414712 2024-11-23T17:25:08,088 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T17:25:11,026 WARN [DataXceiver for client DFSClient_attempt_1732382632246_0001_m_000000_0_-1663840070_1 at /127.0.0.1:55016 [Receiving block BP-931987288-172.17.0.2-1732382611165:blk_1073741900_1076] {}] datanode.BlockReceiver(767): Slow BlockReceiver write data to disk cost: 506ms (threshold=300ms), volume=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data4/, blockId=1073741900, seqno=374 2024-11-23T17:25:11,035 WARN [DataXceiver for client DFSClient_attempt_1732382632246_0001_m_000000_0_-1663840070_1 at /127.0.0.1:49880 [Receiving block BP-931987288-172.17.0.2-1732382611165:blk_1073741900_1076] {}] datanode.BlockReceiver(767): Slow BlockReceiver write data to disk cost: 515ms (threshold=300ms), volume=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data6/, blockId=1073741900, seqno=374 2024-11-23T17:25:11,041 WARN [DataXceiver for client DFSClient_attempt_1732382632246_0001_m_000000_0_-1663840070_1 at /127.0.0.1:53368 [Receiving block BP-931987288-172.17.0.2-1732382611165:blk_1073741900_1076] {}] datanode.BlockReceiver(767): Slow BlockReceiver write data to disk cost: 520ms (threshold=300ms), volume=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data2/, blockId=1073741900, seqno=374 2024-11-23T17:25:28,659 WARN [DataXceiver for client DFSClient_attempt_1732382632246_0001_m_000000_0_-1663840070_1 at /127.0.0.1:49880 [Receiving block BP-931987288-172.17.0.2-1732382611165:blk_1073741900_1076] {}] datanode.BlockReceiver(767): Slow BlockReceiver write data to disk cost: 2715ms (threshold=300ms), volume=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data6/, blockId=1073741900, seqno=1241 2024-11-23T17:25:28,659 WARN [DataXceiver for client DFSClient_attempt_1732382632246_0001_m_000000_0_-1663840070_1 at /127.0.0.1:53368 [Receiving block BP-931987288-172.17.0.2-1732382611165:blk_1073741900_1076] {}] datanode.BlockReceiver(767): Slow BlockReceiver write data to disk cost: 2715ms (threshold=300ms), volume=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data2/, blockId=1073741900, seqno=1241 2024-11-23T17:25:28,659 WARN [DataXceiver for client DFSClient_attempt_1732382632246_0001_m_000000_0_-1663840070_1 at /127.0.0.1:55016 [Receiving block BP-931987288-172.17.0.2-1732382611165:blk_1073741900_1076] {}] datanode.BlockReceiver(767): Slow BlockReceiver write data to disk cost: 2715ms (threshold=300ms), volume=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data4/, blockId=1073741900, seqno=1241 2024-11-23T17:25:28,794 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 3aa5942783f70257c637cc1abea423ca, had cached 0 bytes from a total of 8256 2024-11-23T17:25:28,844 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 28218a36ba9ccc9cb78380286e1f08cb, had cached 0 bytes from a total of 5354 2024-11-23T17:25:38,088 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T17:25:42,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741900_1076 (size=134217728) 2024-11-23T17:25:42,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741900_1076 (size=134217728) 2024-11-23T17:25:42,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741900_1076 (size=134217728) 2024-11-23T17:25:48,484 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region aa2963034c72bba5e0346badbcc8f10a, had cached 0 bytes from a total of 320414712 2024-11-23T17:25:48,590 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region bfb7e4cc6401cf4f4a4036146f100da7, had cached 0 bytes from a total of 320414712 2024-11-23T17:26:08,089 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T17:26:13,796 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 3aa5942783f70257c637cc1abea423ca, had cached 0 bytes from a total of 8256 2024-11-23T17:26:13,845 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 28218a36ba9ccc9cb78380286e1f08cb, had cached 0 bytes from a total of 5354 2024-11-23T17:26:19,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741901_1077 (size=134217728) 2024-11-23T17:26:19,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741901_1077 (size=134217728) 2024-11-23T17:26:19,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741901_1077 (size=134217728) 2024-11-23T17:26:29,354 WARN [DataXceiver for client DFSClient_attempt_1732382632246_0001_m_000000_0_-1663840070_1 at /127.0.0.1:41404 [Receiving block BP-931987288-172.17.0.2-1732382611165:blk_1073741902_1078] {}] datanode.BlockReceiver(767): Slow BlockReceiver write data to disk cost: 1586ms (threshold=300ms), volume=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data6/, blockId=1073741902, seqno=4622 2024-11-23T17:26:29,354 WARN [DataXceiver for client DFSClient_attempt_1732382632246_0001_m_000000_0_-1663840070_1 at /127.0.0.1:47924 [Receiving block BP-931987288-172.17.0.2-1732382611165:blk_1073741902_1078] {}] datanode.BlockReceiver(767): Slow BlockReceiver write data to disk cost: 1586ms (threshold=300ms), volume=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data2/, blockId=1073741902, seqno=4622 2024-11-23T17:26:29,379 WARN [DataXceiver for client DFSClient_attempt_1732382632246_0001_m_000000_0_-1663840070_1 at /127.0.0.1:55838 [Receiving block BP-931987288-172.17.0.2-1732382611165:blk_1073741902_1078] {}] datanode.BlockReceiver(767): Slow BlockReceiver write data to disk cost: 1612ms (threshold=300ms), volume=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data4/, blockId=1073741902, seqno=4622 2024-11-23T17:26:31,440 WARN [DataXceiver for client DFSClient_attempt_1732382632246_0001_m_000000_0_-1663840070_1 at /127.0.0.1:41404 [Receiving block BP-931987288-172.17.0.2-1732382611165:blk_1073741902_1078] {}] datanode.BlockReceiver(767): Slow BlockReceiver write data to disk cost: 1418ms (threshold=300ms), volume=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data6/, blockId=1073741902, seqno=4747 2024-11-23T17:26:31,440 WARN [DataXceiver for client DFSClient_attempt_1732382632246_0001_m_000000_0_-1663840070_1 at /127.0.0.1:47924 [Receiving block BP-931987288-172.17.0.2-1732382611165:blk_1073741902_1078] {}] datanode.BlockReceiver(767): Slow BlockReceiver write data to disk cost: 1418ms (threshold=300ms), volume=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data2/, blockId=1073741902, seqno=4747 2024-11-23T17:26:31,440 WARN [DataXceiver for client DFSClient_attempt_1732382632246_0001_m_000000_0_-1663840070_1 at /127.0.0.1:55838 [Receiving block BP-931987288-172.17.0.2-1732382611165:blk_1073741902_1078] {}] datanode.BlockReceiver(767): Slow BlockReceiver write data to disk cost: 1418ms (threshold=300ms), volume=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data4/, blockId=1073741902, seqno=4747 2024-11-23T17:26:33,485 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region aa2963034c72bba5e0346badbcc8f10a, had cached 0 bytes from a total of 320414712 2024-11-23T17:26:33,591 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region bfb7e4cc6401cf4f4a4036146f100da7, had cached 0 bytes from a total of 320414712 2024-11-23T17:26:33,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741902_1078 (size=51979256) 2024-11-23T17:26:33,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741902_1078 (size=51979256) 2024-11-23T17:26:33,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741902_1078 (size=51979256) 2024-11-23T17:26:34,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741903_1079 (size=17524) 2024-11-23T17:26:34,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741903_1079 (size=17524) 2024-11-23T17:26:34,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741903_1079 (size=17524) 2024-11-23T17:26:34,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741904_1080 (size=482) 2024-11-23T17:26:34,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741904_1080 (size=482) 2024-11-23T17:26:34,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741904_1080 (size=482) 2024-11-23T17:26:34,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741905_1081 (size=17524) 2024-11-23T17:26:34,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741905_1081 (size=17524) 2024-11-23T17:26:34,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741905_1081 (size=17524) 2024-11-23T17:26:34,182 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-0_2/usercache/jenkins/appcache/application_1732382632246_0001/container_1732382632246_0001_01_000002/launch_container.sh] 2024-11-23T17:26:34,182 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-0_2/usercache/jenkins/appcache/application_1732382632246_0001/container_1732382632246_0001_01_000002/container_tokens] 2024-11-23T17:26:34,182 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-0_2/usercache/jenkins/appcache/application_1732382632246_0001/container_1732382632246_0001_01_000002/sysfs] 2024-11-23T17:26:34,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741906_1082 (size=349750) 2024-11-23T17:26:34,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741906_1082 (size=349750) 2024-11-23T17:26:34,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741906_1082 (size=349750) 2024-11-23T17:26:34,202 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732382632246_0001_000001 (auth:SIMPLE) from 127.0.0.1:47860 2024-11-23T17:26:35,886 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-11-23T17:26:35,889 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-11-23T17:26:35,898 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snapshot-testExportFileSystemStateWithSplitRegion 2024-11-23T17:26:35,898 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-11-23T17:26:35,899 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-11-23T17:26:35,899 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1325804072_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion at hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion 2024-11-23T17:26:35,900 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion/.snapshotinfo 2024-11-23T17:26:35,900 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion/data.manifest 2024-11-23T17:26:35,901 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1325804072_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732382659777/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion at hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732382659777/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion 2024-11-23T17:26:35,901 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732382659777/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion/.snapshotinfo 2024-11-23T17:26:35,901 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732382659777/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion/data.manifest 2024-11-23T17:26:35,916 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43122, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-23T17:26:35,918 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testExportFileSystemStateWithSplitRegion 2024-11-23T17:26:35,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure2.ProcedureExecutor(1139): Stored pid=31, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-11-23T17:26:35,926 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732382795926"}]},"ts":"1732382795926"} 2024-11-23T17:26:35,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=31 2024-11-23T17:26:35,927 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50491, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:26:35,929 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportFileSystemStateWithSplitRegion, state=DISABLING in hbase:meta 2024-11-23T17:26:35,929 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set testExportFileSystemStateWithSplitRegion to state=DISABLING 2024-11-23T17:26:35,931 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=32, ppid=31, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testExportFileSystemStateWithSplitRegion}] 2024-11-23T17:26:35,936 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=33, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=aa2963034c72bba5e0346badbcc8f10a, UNASSIGN}, {pid=34, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=bfb7e4cc6401cf4f4a4036146f100da7, UNASSIGN}] 2024-11-23T17:26:35,937 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=34, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=bfb7e4cc6401cf4f4a4036146f100da7, UNASSIGN 2024-11-23T17:26:35,937 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=33, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=aa2963034c72bba5e0346badbcc8f10a, UNASSIGN 2024-11-23T17:26:35,939 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=34 updating hbase:meta row=bfb7e4cc6401cf4f4a4036146f100da7, regionState=CLOSING, regionLocation=215bb12e999f,44667,1732382620918 2024-11-23T17:26:35,939 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=33 updating hbase:meta row=aa2963034c72bba5e0346badbcc8f10a, regionState=CLOSING, regionLocation=215bb12e999f,44667,1732382620918 2024-11-23T17:26:35,940 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=33, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=aa2963034c72bba5e0346badbcc8f10a, UNASSIGN because future has completed 2024-11-23T17:26:35,941 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T17:26:35,941 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=35, ppid=33, state=RUNNABLE, hasLock=false; CloseRegionProcedure aa2963034c72bba5e0346badbcc8f10a, server=215bb12e999f,44667,1732382620918}] 2024-11-23T17:26:35,941 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=34, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=bfb7e4cc6401cf4f4a4036146f100da7, UNASSIGN because future has completed 2024-11-23T17:26:35,942 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T17:26:35,942 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=36, ppid=34, state=RUNNABLE, hasLock=false; CloseRegionProcedure bfb7e4cc6401cf4f4a4036146f100da7, server=215bb12e999f,44667,1732382620918}] 2024-11-23T17:26:36,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=31 2024-11-23T17:26:36,095 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48591, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-23T17:26:36,096 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(122): Close aa2963034c72bba5e0346badbcc8f10a 2024-11-23T17:26:36,096 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T17:26:36,097 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1722): Closing aa2963034c72bba5e0346badbcc8f10a, disabling compactions & flushes 2024-11-23T17:26:36,097 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1755): Closing region testExportFileSystemStateWithSplitRegion,,1732382656596.aa2963034c72bba5e0346badbcc8f10a. 2024-11-23T17:26:36,097 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1776): Time limited wait for close lock on testExportFileSystemStateWithSplitRegion,,1732382656596.aa2963034c72bba5e0346badbcc8f10a. 2024-11-23T17:26:36,097 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1843): Acquired close lock on testExportFileSystemStateWithSplitRegion,,1732382656596.aa2963034c72bba5e0346badbcc8f10a. after waiting 0 ms 2024-11-23T17:26:36,097 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1853): Updates disabled for region testExportFileSystemStateWithSplitRegion,,1732382656596.aa2963034c72bba5e0346badbcc8f10a. 2024-11-23T17:26:36,106 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportFileSystemStateWithSplitRegion/aa2963034c72bba5e0346badbcc8f10a/recovered.edits/10.seqid, newMaxSeqId=10, maxSeqId=6 2024-11-23T17:26:36,107 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:26:36,107 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1973): Closed testExportFileSystemStateWithSplitRegion,,1732382656596.aa2963034c72bba5e0346badbcc8f10a. 2024-11-23T17:26:36,108 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1676): Region close journal for aa2963034c72bba5e0346badbcc8f10a: Waiting for close lock at 1732382796096Running coprocessor pre-close hooks at 1732382796096Disabling compacts and flushes for region at 1732382796096Disabling writes for close at 1732382796097 (+1 ms)Writing region close event to WAL at 1732382796100 (+3 ms)Running coprocessor post-close hooks at 1732382796107 (+7 ms)Closed at 1732382796107 2024-11-23T17:26:36,111 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(157): Closed aa2963034c72bba5e0346badbcc8f10a 2024-11-23T17:26:36,111 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] handler.UnassignRegionHandler(122): Close bfb7e4cc6401cf4f4a4036146f100da7 2024-11-23T17:26:36,112 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T17:26:36,112 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1722): Closing bfb7e4cc6401cf4f4a4036146f100da7, disabling compactions & flushes 2024-11-23T17:26:36,112 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1755): Closing region testExportFileSystemStateWithSplitRegion,5,1732382656596.bfb7e4cc6401cf4f4a4036146f100da7. 2024-11-23T17:26:36,112 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1776): Time limited wait for close lock on testExportFileSystemStateWithSplitRegion,5,1732382656596.bfb7e4cc6401cf4f4a4036146f100da7. 2024-11-23T17:26:36,112 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1843): Acquired close lock on testExportFileSystemStateWithSplitRegion,5,1732382656596.bfb7e4cc6401cf4f4a4036146f100da7. after waiting 0 ms 2024-11-23T17:26:36,112 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1853): Updates disabled for region testExportFileSystemStateWithSplitRegion,5,1732382656596.bfb7e4cc6401cf4f4a4036146f100da7. 2024-11-23T17:26:36,112 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=33 updating hbase:meta row=aa2963034c72bba5e0346badbcc8f10a, regionState=CLOSED 2024-11-23T17:26:36,125 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=35, ppid=33, state=RUNNABLE, hasLock=false; CloseRegionProcedure aa2963034c72bba5e0346badbcc8f10a, server=215bb12e999f,44667,1732382620918 because future has completed 2024-11-23T17:26:36,134 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportFileSystemStateWithSplitRegion/bfb7e4cc6401cf4f4a4036146f100da7/recovered.edits/10.seqid, newMaxSeqId=10, maxSeqId=6 2024-11-23T17:26:36,135 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:26:36,135 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1973): Closed testExportFileSystemStateWithSplitRegion,5,1732382656596.bfb7e4cc6401cf4f4a4036146f100da7. 2024-11-23T17:26:36,136 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1676): Region close journal for bfb7e4cc6401cf4f4a4036146f100da7: Waiting for close lock at 1732382796112Running coprocessor pre-close hooks at 1732382796112Disabling compacts and flushes for region at 1732382796112Disabling writes for close at 1732382796112Writing region close event to WAL at 1732382796113 (+1 ms)Running coprocessor post-close hooks at 1732382796135 (+22 ms)Closed at 1732382796135 2024-11-23T17:26:36,139 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] handler.UnassignRegionHandler(157): Closed bfb7e4cc6401cf4f4a4036146f100da7 2024-11-23T17:26:36,140 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=35, resume processing ppid=33 2024-11-23T17:26:36,140 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=35, ppid=33, state=SUCCESS, hasLock=false; CloseRegionProcedure aa2963034c72bba5e0346badbcc8f10a, server=215bb12e999f,44667,1732382620918 in 193 msec 2024-11-23T17:26:36,140 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=34 updating hbase:meta row=bfb7e4cc6401cf4f4a4036146f100da7, regionState=CLOSED 2024-11-23T17:26:36,144 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=33, ppid=32, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=aa2963034c72bba5e0346badbcc8f10a, UNASSIGN in 204 msec 2024-11-23T17:26:36,144 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=36, ppid=34, state=RUNNABLE, hasLock=false; CloseRegionProcedure bfb7e4cc6401cf4f4a4036146f100da7, server=215bb12e999f,44667,1732382620918 because future has completed 2024-11-23T17:26:36,151 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=36, resume processing ppid=34 2024-11-23T17:26:36,151 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=36, ppid=34, state=SUCCESS, hasLock=false; CloseRegionProcedure bfb7e4cc6401cf4f4a4036146f100da7, server=215bb12e999f,44667,1732382620918 in 206 msec 2024-11-23T17:26:36,156 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=34, resume processing ppid=32 2024-11-23T17:26:36,156 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=34, ppid=32, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=bfb7e4cc6401cf4f4a4036146f100da7, UNASSIGN in 215 msec 2024-11-23T17:26:36,164 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=32, resume processing ppid=31 2024-11-23T17:26:36,164 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=32, ppid=31, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testExportFileSystemStateWithSplitRegion in 228 msec 2024-11-23T17:26:36,167 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732382796166"}]},"ts":"1732382796166"} 2024-11-23T17:26:36,169 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportFileSystemStateWithSplitRegion, state=DISABLED in hbase:meta 2024-11-23T17:26:36,170 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testExportFileSystemStateWithSplitRegion to state=DISABLED 2024-11-23T17:26:36,174 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=31, state=SUCCESS, hasLock=false; DisableTableProcedure table=testExportFileSystemStateWithSplitRegion in 252 msec 2024-11-23T17:26:36,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=31 2024-11-23T17:26:36,246 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-11-23T17:26:36,249 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testExportFileSystemStateWithSplitRegion 2024-11-23T17:26:36,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure2.ProcedureExecutor(1139): Stored pid=37, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-11-23T17:26:36,256 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=37, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-11-23T17:26:36,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] access.PermissionStorage(261): Removing permissions of removed table testExportFileSystemStateWithSplitRegion 2024-11-23T17:26:36,258 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=37, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-11-23T17:26:36,259 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53439, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:26:36,268 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportFileSystemStateWithSplitRegion/aa2963034c72bba5e0346badbcc8f10a 2024-11-23T17:26:36,268 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportFileSystemStateWithSplitRegion/4482cba3a53c5628a185540ec8894b00 2024-11-23T17:26:36,268 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportFileSystemStateWithSplitRegion/bfb7e4cc6401cf4f4a4036146f100da7 2024-11-23T17:26:36,271 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportFileSystemStateWithSplitRegion/4482cba3a53c5628a185540ec8894b00/cf, FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportFileSystemStateWithSplitRegion/4482cba3a53c5628a185540ec8894b00/recovered.edits] 2024-11-23T17:26:36,271 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportFileSystemStateWithSplitRegion/aa2963034c72bba5e0346badbcc8f10a/cf, FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportFileSystemStateWithSplitRegion/aa2963034c72bba5e0346badbcc8f10a/recovered.edits] 2024-11-23T17:26:36,271 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportFileSystemStateWithSplitRegion/bfb7e4cc6401cf4f4a4036146f100da7/cf, FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportFileSystemStateWithSplitRegion/bfb7e4cc6401cf4f4a4036146f100da7/recovered.edits] 2024-11-23T17:26:36,286 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46247, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=ClientService 2024-11-23T17:26:36,287 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41065 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testExportFileSystemStateWithSplitRegion 2024-11-23T17:26:36,359 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportFileSystemStateWithSplitRegion/bfb7e4cc6401cf4f4a4036146f100da7/cf/e83dd5a14691421c937355d2fda698e3_SeqId_4_.4482cba3a53c5628a185540ec8894b00 to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/archive/data/default/testExportFileSystemStateWithSplitRegion/bfb7e4cc6401cf4f4a4036146f100da7/cf/e83dd5a14691421c937355d2fda698e3_SeqId_4_.4482cba3a53c5628a185540ec8894b00 2024-11-23T17:26:36,360 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportFileSystemStateWithSplitRegion/4482cba3a53c5628a185540ec8894b00/cf/e83dd5a14691421c937355d2fda698e3_SeqId_4_ to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/archive/data/default/testExportFileSystemStateWithSplitRegion/4482cba3a53c5628a185540ec8894b00/cf/e83dd5a14691421c937355d2fda698e3_SeqId_4_ 2024-11-23T17:26:36,361 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportFileSystemStateWithSplitRegion/aa2963034c72bba5e0346badbcc8f10a/cf/e83dd5a14691421c937355d2fda698e3_SeqId_4_.4482cba3a53c5628a185540ec8894b00 to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/archive/data/default/testExportFileSystemStateWithSplitRegion/aa2963034c72bba5e0346badbcc8f10a/cf/e83dd5a14691421c937355d2fda698e3_SeqId_4_.4482cba3a53c5628a185540ec8894b00 2024-11-23T17:26:36,369 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportFileSystemStateWithSplitRegion/bfb7e4cc6401cf4f4a4036146f100da7/recovered.edits/10.seqid to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/archive/data/default/testExportFileSystemStateWithSplitRegion/bfb7e4cc6401cf4f4a4036146f100da7/recovered.edits/10.seqid 2024-11-23T17:26:36,370 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportFileSystemStateWithSplitRegion/4482cba3a53c5628a185540ec8894b00/recovered.edits/6.seqid to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/archive/data/default/testExportFileSystemStateWithSplitRegion/4482cba3a53c5628a185540ec8894b00/recovered.edits/6.seqid 2024-11-23T17:26:36,370 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportFileSystemStateWithSplitRegion/bfb7e4cc6401cf4f4a4036146f100da7 2024-11-23T17:26:36,371 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportFileSystemStateWithSplitRegion/4482cba3a53c5628a185540ec8894b00 2024-11-23T17:26:36,371 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportFileSystemStateWithSplitRegion/aa2963034c72bba5e0346badbcc8f10a/recovered.edits/10.seqid to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/archive/data/default/testExportFileSystemStateWithSplitRegion/aa2963034c72bba5e0346badbcc8f10a/recovered.edits/10.seqid 2024-11-23T17:26:36,372 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportFileSystemStateWithSplitRegion/aa2963034c72bba5e0346badbcc8f10a 2024-11-23T17:26:36,373 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testExportFileSystemStateWithSplitRegion regions 2024-11-23T17:26:36,376 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=37, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-11-23T17:26:36,381 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44667 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-11-23T17:26:36,389 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 3 rows of testExportFileSystemStateWithSplitRegion from hbase:meta 2024-11-23T17:26:36,393 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'testExportFileSystemStateWithSplitRegion' descriptor. 2024-11-23T17:26:36,395 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=37, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-11-23T17:26:36,395 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'testExportFileSystemStateWithSplitRegion' from region states. 2024-11-23T17:26:36,396 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion,,1732382647351.4482cba3a53c5628a185540ec8894b00.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732382796396"}]},"ts":"9223372036854775807"} 2024-11-23T17:26:36,396 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion,,1732382656596.aa2963034c72bba5e0346badbcc8f10a.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732382796396"}]},"ts":"9223372036854775807"} 2024-11-23T17:26:36,396 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion,5,1732382656596.bfb7e4cc6401cf4f4a4036146f100da7.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732382796396"}]},"ts":"9223372036854775807"} 2024-11-23T17:26:36,401 INFO [PEWorker-4 {}] assignment.RegionStateStore(562): Deleted 3 regions from META 2024-11-23T17:26:36,401 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 4482cba3a53c5628a185540ec8894b00, NAME => 'testExportFileSystemStateWithSplitRegion,,1732382647351.4482cba3a53c5628a185540ec8894b00.', STARTKEY => '', ENDKEY => ''}, {ENCODED => aa2963034c72bba5e0346badbcc8f10a, NAME => 'testExportFileSystemStateWithSplitRegion,,1732382656596.aa2963034c72bba5e0346badbcc8f10a.', STARTKEY => '', ENDKEY => '5'}, {ENCODED => bfb7e4cc6401cf4f4a4036146f100da7, NAME => 'testExportFileSystemStateWithSplitRegion,5,1732382656596.bfb7e4cc6401cf4f4a4036146f100da7.', STARTKEY => '5', ENDKEY => ''}] 2024-11-23T17:26:36,401 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'testExportFileSystemStateWithSplitRegion' as deleted. 2024-11-23T17:26:36,402 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732382796401"}]},"ts":"9223372036854775807"} 2024-11-23T17:26:36,405 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(867): Deleted table testExportFileSystemStateWithSplitRegion state from META 2024-11-23T17:26:36,407 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=37, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-11-23T17:26:36,409 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=37, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion in 157 msec 2024-11-23T17:26:36,413 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45467-0x10168b8ee290001, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-11-23T17:26:36,413 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44667-0x10168b8ee290002, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-11-23T17:26:36,413 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x10168b8ee290003, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-11-23T17:26:36,413 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-11-23T17:26:36,434 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44667-0x10168b8ee290002, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:26:36,434 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45467-0x10168b8ee290001, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:26:36,434 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:26:36,434 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x10168b8ee290003, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:26:36,434 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data null 2024-11-23T17:26:36,434 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data null 2024-11-23T17:26:36,434 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data null 2024-11-23T17:26:36,434 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-11-23T17:26:36,434 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-11-23T17:26:36,434 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-11-23T17:26:36,435 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data null 2024-11-23T17:26:36,435 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-11-23T17:26:36,436 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T17:26:36,436 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T17:26:36,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=37 2024-11-23T17:26:36,437 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T17:26:36,437 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T17:26:36,438 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testExportFileSystemStateWithSplitRegion 2024-11-23T17:26:36,438 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-11-23T17:26:36,439 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithSplitRegion 2024-11-23T17:26:36,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure2.ProcedureExecutor(1139): Stored pid=38, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-11-23T17:26:36,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=38 2024-11-23T17:26:36,448 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732382796446"}]},"ts":"1732382796446"} 2024-11-23T17:26:36,452 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSplitRegion, state=DISABLING in hbase:meta 2024-11-23T17:26:36,452 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithSplitRegion to state=DISABLING 2024-11-23T17:26:36,454 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=39, ppid=38, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSplitRegion}] 2024-11-23T17:26:36,460 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=28218a36ba9ccc9cb78380286e1f08cb, UNASSIGN}, {pid=41, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=3aa5942783f70257c637cc1abea423ca, UNASSIGN}] 2024-11-23T17:26:36,462 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=28218a36ba9ccc9cb78380286e1f08cb, UNASSIGN 2024-11-23T17:26:36,462 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=41, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=3aa5942783f70257c637cc1abea423ca, UNASSIGN 2024-11-23T17:26:36,464 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=41 updating hbase:meta row=3aa5942783f70257c637cc1abea423ca, regionState=CLOSING, regionLocation=215bb12e999f,41065,1732382621000 2024-11-23T17:26:36,464 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=40 updating hbase:meta row=28218a36ba9ccc9cb78380286e1f08cb, regionState=CLOSING, regionLocation=215bb12e999f,45467,1732382620632 2024-11-23T17:26:36,467 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=28218a36ba9ccc9cb78380286e1f08cb, UNASSIGN because future has completed 2024-11-23T17:26:36,477 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T17:26:36,477 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=42, ppid=40, state=RUNNABLE, hasLock=false; CloseRegionProcedure 28218a36ba9ccc9cb78380286e1f08cb, server=215bb12e999f,45467,1732382620632}] 2024-11-23T17:26:36,477 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=41, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=3aa5942783f70257c637cc1abea423ca, UNASSIGN because future has completed 2024-11-23T17:26:36,481 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T17:26:36,481 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=43, ppid=41, state=RUNNABLE, hasLock=false; CloseRegionProcedure 3aa5942783f70257c637cc1abea423ca, server=215bb12e999f,41065,1732382621000}] 2024-11-23T17:26:36,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=38 2024-11-23T17:26:36,636 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58605, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-23T17:26:36,637 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] handler.UnassignRegionHandler(122): Close 28218a36ba9ccc9cb78380286e1f08cb 2024-11-23T17:26:36,637 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T17:26:36,638 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1722): Closing 28218a36ba9ccc9cb78380286e1f08cb, disabling compactions & flushes 2024-11-23T17:26:36,638 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSplitRegion,,1732382637834.28218a36ba9ccc9cb78380286e1f08cb. 2024-11-23T17:26:36,638 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSplitRegion,,1732382637834.28218a36ba9ccc9cb78380286e1f08cb. 2024-11-23T17:26:36,638 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSplitRegion,,1732382637834.28218a36ba9ccc9cb78380286e1f08cb. after waiting 0 ms 2024-11-23T17:26:36,639 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSplitRegion,,1732382637834.28218a36ba9ccc9cb78380286e1f08cb. 2024-11-23T17:26:36,639 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40201, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-23T17:26:36,641 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(122): Close 3aa5942783f70257c637cc1abea423ca 2024-11-23T17:26:36,641 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T17:26:36,641 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1722): Closing 3aa5942783f70257c637cc1abea423ca, disabling compactions & flushes 2024-11-23T17:26:36,642 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSplitRegion,1,1732382637834.3aa5942783f70257c637cc1abea423ca. 2024-11-23T17:26:36,642 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSplitRegion,1,1732382637834.3aa5942783f70257c637cc1abea423ca. 2024-11-23T17:26:36,642 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSplitRegion,1,1732382637834.3aa5942783f70257c637cc1abea423ca. after waiting 0 ms 2024-11-23T17:26:36,642 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSplitRegion,1,1732382637834.3aa5942783f70257c637cc1abea423ca. 2024-11-23T17:26:36,648 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithSplitRegion/28218a36ba9ccc9cb78380286e1f08cb/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-23T17:26:36,648 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithSplitRegion/3aa5942783f70257c637cc1abea423ca/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-23T17:26:36,649 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:26:36,649 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSplitRegion,1,1732382637834.3aa5942783f70257c637cc1abea423ca. 2024-11-23T17:26:36,649 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1676): Region close journal for 3aa5942783f70257c637cc1abea423ca: Waiting for close lock at 1732382796641Running coprocessor pre-close hooks at 1732382796641Disabling compacts and flushes for region at 1732382796641Disabling writes for close at 1732382796642 (+1 ms)Writing region close event to WAL at 1732382796643 (+1 ms)Running coprocessor post-close hooks at 1732382796649 (+6 ms)Closed at 1732382796649 2024-11-23T17:26:36,650 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:26:36,650 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSplitRegion,,1732382637834.28218a36ba9ccc9cb78380286e1f08cb. 2024-11-23T17:26:36,650 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1676): Region close journal for 28218a36ba9ccc9cb78380286e1f08cb: Waiting for close lock at 1732382796638Running coprocessor pre-close hooks at 1732382796638Disabling compacts and flushes for region at 1732382796638Disabling writes for close at 1732382796638Writing region close event to WAL at 1732382796641 (+3 ms)Running coprocessor post-close hooks at 1732382796650 (+9 ms)Closed at 1732382796650 2024-11-23T17:26:36,655 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=41 updating hbase:meta row=3aa5942783f70257c637cc1abea423ca, regionState=CLOSED 2024-11-23T17:26:36,655 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] handler.UnassignRegionHandler(157): Closed 28218a36ba9ccc9cb78380286e1f08cb 2024-11-23T17:26:36,656 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=40 updating hbase:meta row=28218a36ba9ccc9cb78380286e1f08cb, regionState=CLOSED 2024-11-23T17:26:36,658 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=43, ppid=41, state=RUNNABLE, hasLock=false; CloseRegionProcedure 3aa5942783f70257c637cc1abea423ca, server=215bb12e999f,41065,1732382621000 because future has completed 2024-11-23T17:26:36,659 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=42, ppid=40, state=RUNNABLE, hasLock=false; CloseRegionProcedure 28218a36ba9ccc9cb78380286e1f08cb, server=215bb12e999f,45467,1732382620632 because future has completed 2024-11-23T17:26:36,659 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(157): Closed 3aa5942783f70257c637cc1abea423ca 2024-11-23T17:26:36,662 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=43, resume processing ppid=41 2024-11-23T17:26:36,663 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=42, resume processing ppid=40 2024-11-23T17:26:36,663 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=43, ppid=41, state=SUCCESS, hasLock=false; CloseRegionProcedure 3aa5942783f70257c637cc1abea423ca, server=215bb12e999f,41065,1732382621000 in 178 msec 2024-11-23T17:26:36,663 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=42, ppid=40, state=SUCCESS, hasLock=false; CloseRegionProcedure 28218a36ba9ccc9cb78380286e1f08cb, server=215bb12e999f,45467,1732382620632 in 183 msec 2024-11-23T17:26:36,664 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=41, ppid=39, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=3aa5942783f70257c637cc1abea423ca, UNASSIGN in 202 msec 2024-11-23T17:26:36,668 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=40, resume processing ppid=39 2024-11-23T17:26:36,668 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=40, ppid=39, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=28218a36ba9ccc9cb78380286e1f08cb, UNASSIGN in 203 msec 2024-11-23T17:26:36,672 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=39, resume processing ppid=38 2024-11-23T17:26:36,672 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=39, ppid=38, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSplitRegion in 215 msec 2024-11-23T17:26:36,674 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732382796673"}]},"ts":"1732382796673"} 2024-11-23T17:26:36,676 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSplitRegion, state=DISABLED in hbase:meta 2024-11-23T17:26:36,676 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithSplitRegion to state=DISABLED 2024-11-23T17:26:36,680 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=38, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion in 239 msec 2024-11-23T17:26:36,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=38 2024-11-23T17:26:36,756 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-11-23T17:26:36,757 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithSplitRegion 2024-11-23T17:26:36,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure2.ProcedureExecutor(1139): Stored pid=44, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-11-23T17:26:36,759 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=44, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-11-23T17:26:36,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithSplitRegion 2024-11-23T17:26:36,760 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=44, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-11-23T17:26:36,762 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41065 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithSplitRegion 2024-11-23T17:26:36,764 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithSplitRegion/28218a36ba9ccc9cb78380286e1f08cb 2024-11-23T17:26:36,764 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithSplitRegion/3aa5942783f70257c637cc1abea423ca 2024-11-23T17:26:36,766 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithSplitRegion/28218a36ba9ccc9cb78380286e1f08cb/cf, FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithSplitRegion/28218a36ba9ccc9cb78380286e1f08cb/recovered.edits] 2024-11-23T17:26:36,766 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithSplitRegion/3aa5942783f70257c637cc1abea423ca/cf, FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithSplitRegion/3aa5942783f70257c637cc1abea423ca/recovered.edits] 2024-11-23T17:26:36,771 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithSplitRegion/3aa5942783f70257c637cc1abea423ca/cf/8c47d000c6bc4a349cd0d4d26b4572fd to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/3aa5942783f70257c637cc1abea423ca/cf/8c47d000c6bc4a349cd0d4d26b4572fd 2024-11-23T17:26:36,771 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithSplitRegion/28218a36ba9ccc9cb78380286e1f08cb/cf/ae85b8a437be4d00bf70e5a049ed0eaf to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/28218a36ba9ccc9cb78380286e1f08cb/cf/ae85b8a437be4d00bf70e5a049ed0eaf 2024-11-23T17:26:36,774 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithSplitRegion/28218a36ba9ccc9cb78380286e1f08cb/recovered.edits/9.seqid to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/28218a36ba9ccc9cb78380286e1f08cb/recovered.edits/9.seqid 2024-11-23T17:26:36,788 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithSplitRegion/28218a36ba9ccc9cb78380286e1f08cb 2024-11-23T17:26:36,788 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithSplitRegion/3aa5942783f70257c637cc1abea423ca/recovered.edits/9.seqid to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/3aa5942783f70257c637cc1abea423ca/recovered.edits/9.seqid 2024-11-23T17:26:36,789 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithSplitRegion/3aa5942783f70257c637cc1abea423ca 2024-11-23T17:26:36,789 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithSplitRegion regions 2024-11-23T17:26:36,793 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=44, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-11-23T17:26:36,798 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithSplitRegion from hbase:meta 2024-11-23T17:26:36,801 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithSplitRegion' descriptor. 2024-11-23T17:26:36,803 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=44, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-11-23T17:26:36,803 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithSplitRegion' from region states. 2024-11-23T17:26:36,803 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion,,1732382637834.28218a36ba9ccc9cb78380286e1f08cb.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732382796803"}]},"ts":"9223372036854775807"} 2024-11-23T17:26:36,803 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion,1,1732382637834.3aa5942783f70257c637cc1abea423ca.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732382796803"}]},"ts":"9223372036854775807"} 2024-11-23T17:26:36,806 INFO [PEWorker-4 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-23T17:26:36,806 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 28218a36ba9ccc9cb78380286e1f08cb, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,,1732382637834.28218a36ba9ccc9cb78380286e1f08cb.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 3aa5942783f70257c637cc1abea423ca, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,1,1732382637834.3aa5942783f70257c637cc1abea423ca.', STARTKEY => '1', ENDKEY => ''}] 2024-11-23T17:26:36,807 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithSplitRegion' as deleted. 2024-11-23T17:26:36,807 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732382796807"}]},"ts":"9223372036854775807"} 2024-11-23T17:26:36,809 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithSplitRegion state from META 2024-11-23T17:26:36,810 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=44, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-11-23T17:26:36,813 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=44, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion in 53 msec 2024-11-23T17:26:36,866 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45467-0x10168b8ee290001, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-11-23T17:26:36,866 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-11-23T17:26:36,866 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44667-0x10168b8ee290002, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-11-23T17:26:36,866 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x10168b8ee290003, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-11-23T17:26:36,867 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF 2024-11-23T17:26:36,867 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF 2024-11-23T17:26:36,868 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF 2024-11-23T17:26:36,868 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF 2024-11-23T17:26:36,899 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-11-23T17:26:36,899 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45467-0x10168b8ee290001, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-11-23T17:26:36,899 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x10168b8ee290003, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-11-23T17:26:36,899 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:26:36,899 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45467-0x10168b8ee290001, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:26:36,899 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x10168b8ee290003, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:26:36,899 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44667-0x10168b8ee290002, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-11-23T17:26:36,899 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44667-0x10168b8ee290002, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:26:36,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=44 2024-11-23T17:26:36,900 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithSplitRegion 2024-11-23T17:26:36,900 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-11-23T17:26:36,945 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithSplitRegion" type: DISABLED 2024-11-23T17:26:36,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-11-23T17:26:36,950 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snapshot-testExportFileSystemStateWithSplitRegion" type: DISABLED 2024-11-23T17:26:36,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotManager(381): Deleting snapshot: snapshot-testExportFileSystemStateWithSplitRegion 2024-11-23T17:26:36,954 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithSplitRegion" type: DISABLED 2024-11-23T17:26:36,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithSplitRegion 2024-11-23T17:26:36,983 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithSplitRegion Thread=764 (was 715) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-15 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1549690208) connection to localhost/127.0.0.1:37605 from jenkins.hfs.1 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/215bb12e999f:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-952559320_1 at /127.0.0.1:45566 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 98394) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44087 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/215bb12e999f:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1325804072_22 at /127.0.0.1:41522 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1325804072_22 at /127.0.0.1:45586 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1549690208) connection to localhost/127.0.0.1:44087 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Container metrics unregistration java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RSProcedureDispatcher-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/215bb12e999f:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/215bb12e999f:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/215bb12e999f:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-952559320_1 at /127.0.0.1:41496 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-13 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #0 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.read1(BufferedReader.java:213) java.base@17.0.11/java.io.BufferedReader.read(BufferedReader.java:287) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.parseExecResult(Shell.java:1295) app//org.apache.hadoop.util.Shell.runCommand(Shell.java:1054) app//org.apache.hadoop.util.Shell.run(Shell.java:959) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:1282) app//org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor.launchContainer(DefaultContainerExecutor.java:349) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.launchContainer(ContainerLaunch.java:600) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:388) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:105) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1549690208) connection to localhost/127.0.0.1:37605 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/215bb12e999f:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1549690208) connection to localhost/127.0.0.1:37605 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1325804072_22 at /127.0.0.1:52084 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-1385 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) - Thread LEAK? -, OpenFileDescriptor=793 (was 771) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1152 (was 754) - SystemLoadAverage LEAK? -, ProcessCount=19 (was 13) - ProcessCount LEAK? -, AvailableMemoryMB=780 (was 1777) 2024-11-23T17:26:36,984 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=764 is superior to 500 2024-11-23T17:26:37,002 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportWithTargetName Thread=764, OpenFileDescriptor=793, MaxFileDescriptor=1048576, SystemLoadAverage=1152, ProcessCount=19, AvailableMemoryMB=779 2024-11-23T17:26:37,002 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=764 is superior to 500 2024-11-23T17:26:37,004 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T17:26:37,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure2.ProcedureExecutor(1139): Stored pid=45, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithTargetName 2024-11-23T17:26:37,006 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T17:26:37,006 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:26:37,006 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithTargetName" procId is: 45 2024-11-23T17:26:37,007 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T17:26:37,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=45 2024-11-23T17:26:37,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741907_1083 (size=406) 2024-11-23T17:26:37,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741907_1083 (size=406) 2024-11-23T17:26:37,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741907_1083 (size=406) 2024-11-23T17:26:37,021 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => f7d97637c77920082c0e9e41eceb5590, NAME => 'testtb-testExportWithTargetName,,1732382797003.f7d97637c77920082c0e9e41eceb5590.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf 2024-11-23T17:26:37,021 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 2c9151745ab74b8dedbd24f6a91a594a, NAME => 'testtb-testExportWithTargetName,1,1732382797003.2c9151745ab74b8dedbd24f6a91a594a.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf 2024-11-23T17:26:37,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741908_1084 (size=67) 2024-11-23T17:26:37,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741909_1085 (size=67) 2024-11-23T17:26:37,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741909_1085 (size=67) 2024-11-23T17:26:37,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741908_1084 (size=67) 2024-11-23T17:26:37,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741909_1085 (size=67) 2024-11-23T17:26:37,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741908_1084 (size=67) 2024-11-23T17:26:37,045 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,1,1732382797003.2c9151745ab74b8dedbd24f6a91a594a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:26:37,045 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1722): Closing 2c9151745ab74b8dedbd24f6a91a594a, disabling compactions & flushes 2024-11-23T17:26:37,045 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,1,1732382797003.2c9151745ab74b8dedbd24f6a91a594a. 2024-11-23T17:26:37,045 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,1,1732382797003.2c9151745ab74b8dedbd24f6a91a594a. 2024-11-23T17:26:37,045 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,1,1732382797003.2c9151745ab74b8dedbd24f6a91a594a. after waiting 0 ms 2024-11-23T17:26:37,045 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,1,1732382797003.2c9151745ab74b8dedbd24f6a91a594a. 2024-11-23T17:26:37,045 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,1,1732382797003.2c9151745ab74b8dedbd24f6a91a594a. 2024-11-23T17:26:37,045 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1676): Region close journal for 2c9151745ab74b8dedbd24f6a91a594a: Waiting for close lock at 1732382797045Disabling compacts and flushes for region at 1732382797045Disabling writes for close at 1732382797045Writing region close event to WAL at 1732382797045Closed at 1732382797045 2024-11-23T17:26:37,049 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,,1732382797003.f7d97637c77920082c0e9e41eceb5590.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:26:37,050 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1722): Closing f7d97637c77920082c0e9e41eceb5590, disabling compactions & flushes 2024-11-23T17:26:37,050 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,,1732382797003.f7d97637c77920082c0e9e41eceb5590. 2024-11-23T17:26:37,050 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,,1732382797003.f7d97637c77920082c0e9e41eceb5590. 2024-11-23T17:26:37,050 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,,1732382797003.f7d97637c77920082c0e9e41eceb5590. after waiting 0 ms 2024-11-23T17:26:37,050 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,,1732382797003.f7d97637c77920082c0e9e41eceb5590. 2024-11-23T17:26:37,050 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,,1732382797003.f7d97637c77920082c0e9e41eceb5590. 2024-11-23T17:26:37,050 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1676): Region close journal for f7d97637c77920082c0e9e41eceb5590: Waiting for close lock at 1732382797050Disabling compacts and flushes for region at 1732382797050Disabling writes for close at 1732382797050Writing region close event to WAL at 1732382797050Closed at 1732382797050 2024-11-23T17:26:37,051 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T17:26:37,052 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithTargetName,1,1732382797003.2c9151745ab74b8dedbd24f6a91a594a.","families":{"info":[{"qualifier":"regioninfo","vlen":66,"tag":[],"timestamp":"1732382797051"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732382797051"}]},"ts":"1732382797051"} 2024-11-23T17:26:37,052 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithTargetName,,1732382797003.f7d97637c77920082c0e9e41eceb5590.","families":{"info":[{"qualifier":"regioninfo","vlen":66,"tag":[],"timestamp":"1732382797051"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732382797051"}]},"ts":"1732382797051"} 2024-11-23T17:26:37,055 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-23T17:26:37,056 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T17:26:37,056 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732382797056"}]},"ts":"1732382797056"} 2024-11-23T17:26:37,059 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=ENABLING in hbase:meta 2024-11-23T17:26:37,059 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {215bb12e999f=0} racks are {/default-rack=0} 2024-11-23T17:26:37,061 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-23T17:26:37,061 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-23T17:26:37,061 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-23T17:26:37,061 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-23T17:26:37,061 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-23T17:26:37,061 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-23T17:26:37,061 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-23T17:26:37,061 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-23T17:26:37,061 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-23T17:26:37,061 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-23T17:26:37,061 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=f7d97637c77920082c0e9e41eceb5590, ASSIGN}, {pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=2c9151745ab74b8dedbd24f6a91a594a, ASSIGN}] 2024-11-23T17:26:37,063 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=2c9151745ab74b8dedbd24f6a91a594a, ASSIGN 2024-11-23T17:26:37,063 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=f7d97637c77920082c0e9e41eceb5590, ASSIGN 2024-11-23T17:26:37,064 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=2c9151745ab74b8dedbd24f6a91a594a, ASSIGN; state=OFFLINE, location=215bb12e999f,41065,1732382621000; forceNewPlan=false, retain=false 2024-11-23T17:26:37,064 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=f7d97637c77920082c0e9e41eceb5590, ASSIGN; state=OFFLINE, location=215bb12e999f,44667,1732382620918; forceNewPlan=false, retain=false 2024-11-23T17:26:37,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=45 2024-11-23T17:26:37,215 INFO [215bb12e999f:39621 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-23T17:26:37,216 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=47 updating hbase:meta row=2c9151745ab74b8dedbd24f6a91a594a, regionState=OPENING, regionLocation=215bb12e999f,41065,1732382621000 2024-11-23T17:26:37,216 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=46 updating hbase:meta row=f7d97637c77920082c0e9e41eceb5590, regionState=OPENING, regionLocation=215bb12e999f,44667,1732382620918 2024-11-23T17:26:37,220 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=2c9151745ab74b8dedbd24f6a91a594a, ASSIGN because future has completed 2024-11-23T17:26:37,221 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=48, ppid=47, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2c9151745ab74b8dedbd24f6a91a594a, server=215bb12e999f,41065,1732382621000}] 2024-11-23T17:26:37,222 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=f7d97637c77920082c0e9e41eceb5590, ASSIGN because future has completed 2024-11-23T17:26:37,223 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=49, ppid=46, state=RUNNABLE, hasLock=false; OpenRegionProcedure f7d97637c77920082c0e9e41eceb5590, server=215bb12e999f,44667,1732382620918}] 2024-11-23T17:26:37,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=45 2024-11-23T17:26:37,381 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] handler.AssignRegionHandler(132): Open testtb-testExportWithTargetName,1,1732382797003.2c9151745ab74b8dedbd24f6a91a594a. 2024-11-23T17:26:37,382 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7752): Opening region: {ENCODED => 2c9151745ab74b8dedbd24f6a91a594a, NAME => 'testtb-testExportWithTargetName,1,1732382797003.2c9151745ab74b8dedbd24f6a91a594a.', STARTKEY => '1', ENDKEY => ''} 2024-11-23T17:26:37,383 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithTargetName,1,1732382797003.2c9151745ab74b8dedbd24f6a91a594a. service=AccessControlService 2024-11-23T17:26:37,383 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:26:37,383 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] handler.AssignRegionHandler(132): Open testtb-testExportWithTargetName,,1732382797003.f7d97637c77920082c0e9e41eceb5590. 2024-11-23T17:26:37,384 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithTargetName 2c9151745ab74b8dedbd24f6a91a594a 2024-11-23T17:26:37,384 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(7752): Opening region: {ENCODED => f7d97637c77920082c0e9e41eceb5590, NAME => 'testtb-testExportWithTargetName,,1732382797003.f7d97637c77920082c0e9e41eceb5590.', STARTKEY => '', ENDKEY => '1'} 2024-11-23T17:26:37,384 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,1,1732382797003.2c9151745ab74b8dedbd24f6a91a594a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:26:37,384 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithTargetName,,1732382797003.f7d97637c77920082c0e9e41eceb5590. service=AccessControlService 2024-11-23T17:26:37,384 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7794): checking encryption for 2c9151745ab74b8dedbd24f6a91a594a 2024-11-23T17:26:37,384 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7797): checking classloading for 2c9151745ab74b8dedbd24f6a91a594a 2024-11-23T17:26:37,384 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:26:37,385 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithTargetName f7d97637c77920082c0e9e41eceb5590 2024-11-23T17:26:37,385 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,,1732382797003.f7d97637c77920082c0e9e41eceb5590.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:26:37,385 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(7794): checking encryption for f7d97637c77920082c0e9e41eceb5590 2024-11-23T17:26:37,385 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(7797): checking classloading for f7d97637c77920082c0e9e41eceb5590 2024-11-23T17:26:37,386 INFO [StoreOpener-2c9151745ab74b8dedbd24f6a91a594a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 2c9151745ab74b8dedbd24f6a91a594a 2024-11-23T17:26:37,386 INFO [StoreOpener-f7d97637c77920082c0e9e41eceb5590-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region f7d97637c77920082c0e9e41eceb5590 2024-11-23T17:26:37,388 INFO [StoreOpener-2c9151745ab74b8dedbd24f6a91a594a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2c9151745ab74b8dedbd24f6a91a594a columnFamilyName cf 2024-11-23T17:26:37,388 INFO [StoreOpener-f7d97637c77920082c0e9e41eceb5590-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f7d97637c77920082c0e9e41eceb5590 columnFamilyName cf 2024-11-23T17:26:37,388 DEBUG [StoreOpener-2c9151745ab74b8dedbd24f6a91a594a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:26:37,388 DEBUG [StoreOpener-f7d97637c77920082c0e9e41eceb5590-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:26:37,389 INFO [StoreOpener-2c9151745ab74b8dedbd24f6a91a594a-1 {}] regionserver.HStore(327): Store=2c9151745ab74b8dedbd24f6a91a594a/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T17:26:37,389 INFO [StoreOpener-f7d97637c77920082c0e9e41eceb5590-1 {}] regionserver.HStore(327): Store=f7d97637c77920082c0e9e41eceb5590/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T17:26:37,389 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1038): replaying wal for f7d97637c77920082c0e9e41eceb5590 2024-11-23T17:26:37,389 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1038): replaying wal for 2c9151745ab74b8dedbd24f6a91a594a 2024-11-23T17:26:37,390 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithTargetName/f7d97637c77920082c0e9e41eceb5590 2024-11-23T17:26:37,390 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithTargetName/2c9151745ab74b8dedbd24f6a91a594a 2024-11-23T17:26:37,390 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithTargetName/f7d97637c77920082c0e9e41eceb5590 2024-11-23T17:26:37,390 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithTargetName/2c9151745ab74b8dedbd24f6a91a594a 2024-11-23T17:26:37,391 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1048): stopping wal replay for f7d97637c77920082c0e9e41eceb5590 2024-11-23T17:26:37,391 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1048): stopping wal replay for 2c9151745ab74b8dedbd24f6a91a594a 2024-11-23T17:26:37,391 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1060): Cleaning up temporary data for f7d97637c77920082c0e9e41eceb5590 2024-11-23T17:26:37,391 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1060): Cleaning up temporary data for 2c9151745ab74b8dedbd24f6a91a594a 2024-11-23T17:26:37,393 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1093): writing seq id for 2c9151745ab74b8dedbd24f6a91a594a 2024-11-23T17:26:37,397 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1093): writing seq id for f7d97637c77920082c0e9e41eceb5590 2024-11-23T17:26:37,400 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithTargetName/f7d97637c77920082c0e9e41eceb5590/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T17:26:37,401 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1114): Opened f7d97637c77920082c0e9e41eceb5590; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69076403, jitterRate=0.029318615794181824}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T17:26:37,401 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1122): Running coprocessor post-open hooks for f7d97637c77920082c0e9e41eceb5590 2024-11-23T17:26:37,402 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1006): Region open journal for f7d97637c77920082c0e9e41eceb5590: Running coprocessor pre-open hook at 1732382797385Writing region info on filesystem at 1732382797385Initializing all the Stores at 1732382797386 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732382797386Cleaning up temporary data from old regions at 1732382797391 (+5 ms)Running coprocessor post-open hooks at 1732382797402 (+11 ms)Region opened successfully at 1732382797402 2024-11-23T17:26:37,404 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithTargetName,,1732382797003.f7d97637c77920082c0e9e41eceb5590., pid=49, masterSystemTime=1732382797378 2024-11-23T17:26:37,407 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=46 updating hbase:meta row=f7d97637c77920082c0e9e41eceb5590, regionState=OPEN, openSeqNum=2, regionLocation=215bb12e999f,44667,1732382620918 2024-11-23T17:26:37,410 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithTargetName,,1732382797003.f7d97637c77920082c0e9e41eceb5590. 2024-11-23T17:26:37,411 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] handler.AssignRegionHandler(153): Opened testtb-testExportWithTargetName,,1732382797003.f7d97637c77920082c0e9e41eceb5590. 2024-11-23T17:26:37,411 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=49, ppid=46, state=RUNNABLE, hasLock=false; OpenRegionProcedure f7d97637c77920082c0e9e41eceb5590, server=215bb12e999f,44667,1732382620918 because future has completed 2024-11-23T17:26:37,413 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithTargetName/2c9151745ab74b8dedbd24f6a91a594a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T17:26:37,414 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1114): Opened 2c9151745ab74b8dedbd24f6a91a594a; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67866491, jitterRate=0.011289522051811218}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T17:26:37,414 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 2c9151745ab74b8dedbd24f6a91a594a 2024-11-23T17:26:37,414 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1006): Region open journal for 2c9151745ab74b8dedbd24f6a91a594a: Running coprocessor pre-open hook at 1732382797385Writing region info on filesystem at 1732382797385Initializing all the Stores at 1732382797386 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732382797386Cleaning up temporary data from old regions at 1732382797391 (+5 ms)Running coprocessor post-open hooks at 1732382797414 (+23 ms)Region opened successfully at 1732382797414 2024-11-23T17:26:37,417 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithTargetName,1,1732382797003.2c9151745ab74b8dedbd24f6a91a594a., pid=48, masterSystemTime=1732382797375 2024-11-23T17:26:37,420 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithTargetName,1,1732382797003.2c9151745ab74b8dedbd24f6a91a594a. 2024-11-23T17:26:37,420 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] handler.AssignRegionHandler(153): Opened testtb-testExportWithTargetName,1,1732382797003.2c9151745ab74b8dedbd24f6a91a594a. 2024-11-23T17:26:37,427 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=47 updating hbase:meta row=2c9151745ab74b8dedbd24f6a91a594a, regionState=OPEN, openSeqNum=2, regionLocation=215bb12e999f,41065,1732382621000 2024-11-23T17:26:37,428 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=49, resume processing ppid=46 2024-11-23T17:26:37,428 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=49, ppid=46, state=SUCCESS, hasLock=false; OpenRegionProcedure f7d97637c77920082c0e9e41eceb5590, server=215bb12e999f,44667,1732382620918 in 194 msec 2024-11-23T17:26:37,430 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=48, ppid=47, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2c9151745ab74b8dedbd24f6a91a594a, server=215bb12e999f,41065,1732382621000 because future has completed 2024-11-23T17:26:37,432 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=46, ppid=45, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=f7d97637c77920082c0e9e41eceb5590, ASSIGN in 367 msec 2024-11-23T17:26:37,435 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=48, resume processing ppid=47 2024-11-23T17:26:37,436 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=48, ppid=47, state=SUCCESS, hasLock=false; OpenRegionProcedure 2c9151745ab74b8dedbd24f6a91a594a, server=215bb12e999f,41065,1732382621000 in 211 msec 2024-11-23T17:26:37,438 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=47, resume processing ppid=45 2024-11-23T17:26:37,438 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=47, ppid=45, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=2c9151745ab74b8dedbd24f6a91a594a, ASSIGN in 374 msec 2024-11-23T17:26:37,439 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T17:26:37,439 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732382797439"}]},"ts":"1732382797439"} 2024-11-23T17:26:37,442 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=ENABLED in hbase:meta 2024-11-23T17:26:37,443 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T17:26:37,444 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithTargetName jenkins: RWXCA 2024-11-23T17:26:37,448 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41065 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-11-23T17:26:37,497 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45467-0x10168b8ee290001, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:26:37,497 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44667-0x10168b8ee290002, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:26:37,497 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:26:37,497 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x10168b8ee290003, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:26:37,508 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-11-23T17:26:37,508 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-11-23T17:26:37,509 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-11-23T17:26:37,509 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-11-23T17:26:37,511 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=45, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithTargetName in 504 msec 2024-11-23T17:26:37,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=45 2024-11-23T17:26:37,636 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithTargetName completed 2024-11-23T17:26:37,636 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportWithTargetName get assigned. Timeout = 60000ms 2024-11-23T17:26:37,636 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T17:26:37,638 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50634, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:26:37,643 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportWithTargetName assigned to meta. Checking AM states. 2024-11-23T17:26:37,643 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T17:26:37,643 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportWithTargetName assigned. 2024-11-23T17:26:37,644 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-23T17:26:37,647 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-11-23T17:26:37,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732382797647 (current time:1732382797647). 2024-11-23T17:26:37,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-23T17:26:37,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithTargetName VERSION not specified, setting to 2 2024-11-23T17:26:37,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-23T17:26:37,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1394c86b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:26:37,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.ClusterIdFetcher(90): Going to request 215bb12e999f,39621,-1 for getting cluster id 2024-11-23T17:26:37,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:26:37,649 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d' 2024-11-23T17:26:37,649 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:26:37,650 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d" 2024-11-23T17:26:37,650 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@124f409f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:26:37,650 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [215bb12e999f,39621,-1] 2024-11-23T17:26:37,650 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:26:37,650 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:26:37,651 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43144, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:26:37,652 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72d40289, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:26:37,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:26:37,654 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=215bb12e999f,44667,1732382620918, seqNum=-1] 2024-11-23T17:26:37,654 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:26:37,656 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50636, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:26:37,657 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621. 2024-11-23T17:26:37,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:26:37,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:26:37,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:26:37,658 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:26:37,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5ed6e299, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:26:37,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.ClusterIdFetcher(90): Going to request 215bb12e999f,39621,-1 for getting cluster id 2024-11-23T17:26:37,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:26:37,660 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d' 2024-11-23T17:26:37,660 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:26:37,660 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d" 2024-11-23T17:26:37,660 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b9752f6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:26:37,660 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [215bb12e999f,39621,-1] 2024-11-23T17:26:37,661 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:26:37,661 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:26:37,662 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43162, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:26:37,663 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3221c2d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:26:37,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:26:37,665 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=215bb12e999f,44667,1732382620918, seqNum=-1] 2024-11-23T17:26:37,665 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:26:37,668 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50644, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:26:37,670 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithTargetName', locateType=CURRENT is [region=hbase:acl,,1732382625522.41264850465b123187f03a78b2521b39., hostname=215bb12e999f,41065,1732382621000, seqNum=2] 2024-11-23T17:26:37,671 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:26:37,672 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35254, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:26:37,674 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621. 2024-11-23T17:26:37,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:26:37,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:26:37,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:26:37,674 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:26:37,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-11-23T17:26:37,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-23T17:26:37,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure2.ProcedureExecutor(1139): Stored pid=50, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-11-23T17:26:37,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 50 2024-11-23T17:26:37,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=50 2024-11-23T17:26:37,678 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-23T17:26:37,680 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-23T17:26:37,682 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-23T17:26:37,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741910_1086 (size=167) 2024-11-23T17:26:37,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741910_1086 (size=167) 2024-11-23T17:26:37,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741910_1086 (size=167) 2024-11-23T17:26:37,701 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-23T17:26:37,701 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=51, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f7d97637c77920082c0e9e41eceb5590}, {pid=52, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2c9151745ab74b8dedbd24f6a91a594a}] 2024-11-23T17:26:37,702 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=52, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2c9151745ab74b8dedbd24f6a91a594a 2024-11-23T17:26:37,703 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=51, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f7d97637c77920082c0e9e41eceb5590 2024-11-23T17:26:37,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=50 2024-11-23T17:26:37,856 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41065 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=52 2024-11-23T17:26:37,856 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44667 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=51 2024-11-23T17:26:37,856 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,,1732382797003.f7d97637c77920082c0e9e41eceb5590. 2024-11-23T17:26:37,857 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,1,1732382797003.2c9151745ab74b8dedbd24f6a91a594a. 2024-11-23T17:26:37,857 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.HRegion(2603): Flush status journal for f7d97637c77920082c0e9e41eceb5590: 2024-11-23T17:26:37,857 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.HRegion(2603): Flush status journal for 2c9151745ab74b8dedbd24f6a91a594a: 2024-11-23T17:26:37,857 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,,1732382797003.f7d97637c77920082c0e9e41eceb5590. for emptySnaptb0-testExportWithTargetName completed. 2024-11-23T17:26:37,857 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,1,1732382797003.2c9151745ab74b8dedbd24f6a91a594a. for emptySnaptb0-testExportWithTargetName completed. 2024-11-23T17:26:37,857 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,,1732382797003.f7d97637c77920082c0e9e41eceb5590.' region-info for snapshot=emptySnaptb0-testExportWithTargetName 2024-11-23T17:26:37,857 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,1,1732382797003.2c9151745ab74b8dedbd24f6a91a594a.' region-info for snapshot=emptySnaptb0-testExportWithTargetName 2024-11-23T17:26:37,857 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:26:37,857 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:26:37,857 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-23T17:26:37,857 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-23T17:26:37,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741912_1088 (size=70) 2024-11-23T17:26:37,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741912_1088 (size=70) 2024-11-23T17:26:37,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741911_1087 (size=70) 2024-11-23T17:26:37,870 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,1,1732382797003.2c9151745ab74b8dedbd24f6a91a594a. 2024-11-23T17:26:37,870 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,,1732382797003.f7d97637c77920082c0e9e41eceb5590. 2024-11-23T17:26:37,870 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=52 2024-11-23T17:26:37,870 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=51 2024-11-23T17:26:37,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.HMaster(4169): Remote procedure done, pid=52 2024-11-23T17:26:37,871 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithTargetName on region 2c9151745ab74b8dedbd24f6a91a594a 2024-11-23T17:26:37,871 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=52, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2c9151745ab74b8dedbd24f6a91a594a 2024-11-23T17:26:37,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.HMaster(4169): Remote procedure done, pid=51 2024-11-23T17:26:37,871 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithTargetName on region f7d97637c77920082c0e9e41eceb5590 2024-11-23T17:26:37,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741911_1087 (size=70) 2024-11-23T17:26:37,871 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=51, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f7d97637c77920082c0e9e41eceb5590 2024-11-23T17:26:37,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741911_1087 (size=70) 2024-11-23T17:26:37,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741912_1088 (size=70) 2024-11-23T17:26:37,875 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=52, ppid=50, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 2c9151745ab74b8dedbd24f6a91a594a in 173 msec 2024-11-23T17:26:37,877 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=51, resume processing ppid=50 2024-11-23T17:26:37,877 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=51, ppid=50, state=SUCCESS, hasLock=false; SnapshotRegionProcedure f7d97637c77920082c0e9e41eceb5590 in 173 msec 2024-11-23T17:26:37,877 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-23T17:26:37,878 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-23T17:26:37,879 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-23T17:26:37,879 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithTargetName 2024-11-23T17:26:37,880 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithTargetName 2024-11-23T17:26:37,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741913_1089 (size=549) 2024-11-23T17:26:37,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741913_1089 (size=549) 2024-11-23T17:26:37,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741913_1089 (size=549) 2024-11-23T17:26:37,895 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-23T17:26:37,902 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-23T17:26:37,903 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithTargetName to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/emptySnaptb0-testExportWithTargetName 2024-11-23T17:26:37,905 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-23T17:26:37,905 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 50 2024-11-23T17:26:37,915 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=50, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } in 230 msec 2024-11-23T17:26:37,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=50 2024-11-23T17:26:37,996 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithTargetName completed 2024-11-23T17:26:38,002 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='0ac387fc7007b6250477b906b6b2b6863', locateType=CURRENT is [region=testtb-testExportWithTargetName,,1732382797003.f7d97637c77920082c0e9e41eceb5590., hostname=215bb12e999f,44667,1732382620918, seqNum=2] 2024-11-23T17:26:38,003 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='10f3d6478b21c860748830a48c39256cc', locateType=CURRENT is [region=testtb-testExportWithTargetName,1,1732382797003.2c9151745ab74b8dedbd24f6a91a594a., hostname=215bb12e999f,41065,1732382621000, seqNum=2] 2024-11-23T17:26:38,004 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='2c0538e17721799d0fdf673a9c23bdaf9', locateType=CURRENT is [region=testtb-testExportWithTargetName,1,1732382797003.2c9151745ab74b8dedbd24f6a91a594a., hostname=215bb12e999f,41065,1732382621000, seqNum=2] 2024-11-23T17:26:38,006 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='3855f339bc88e4aff65770b817e71b6d2', locateType=CURRENT is [region=testtb-testExportWithTargetName,1,1732382797003.2c9151745ab74b8dedbd24f6a91a594a., hostname=215bb12e999f,41065,1732382621000, seqNum=2] 2024-11-23T17:26:38,007 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='400661f77b9c8ebbe5c9f58884bea7e85', locateType=CURRENT is [region=testtb-testExportWithTargetName,1,1732382797003.2c9151745ab74b8dedbd24f6a91a594a., hostname=215bb12e999f,41065,1732382621000, seqNum=2] 2024-11-23T17:26:38,012 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35270, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:26:38,012 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44667 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithTargetName,,1732382797003.f7d97637c77920082c0e9e41eceb5590. with WAL disabled. Data may be lost in the event of a crash. 2024-11-23T17:26:38,016 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41065 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithTargetName,1,1732382797003.2c9151745ab74b8dedbd24f6a91a594a. with WAL disabled. Data may be lost in the event of a crash. 2024-11-23T17:26:38,020 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-23T17:26:38,024 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithTargetName 2024-11-23T17:26:38,024 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithTargetName,,1732382797003.f7d97637c77920082c0e9e41eceb5590. 2024-11-23T17:26:38,025 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T17:26:38,026 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithTargetName' 2024-11-23T17:26:38,027 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-23T17:26:38,035 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-23T17:26:38,045 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-23T17:26:38,052 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-11-23T17:26:38,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732382798052 (current time:1732382798052). 2024-11-23T17:26:38,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-23T17:26:38,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithTargetName VERSION not specified, setting to 2 2024-11-23T17:26:38,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-23T17:26:38,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@29bc30de, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:26:38,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] client.ClusterIdFetcher(90): Going to request 215bb12e999f,39621,-1 for getting cluster id 2024-11-23T17:26:38,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:26:38,055 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d' 2024-11-23T17:26:38,055 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:26:38,055 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d" 2024-11-23T17:26:38,055 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1071cae4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:26:38,055 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [215bb12e999f,39621,-1] 2024-11-23T17:26:38,056 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:26:38,056 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:26:38,057 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43194, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:26:38,058 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6be41338, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:26:38,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:26:38,063 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=215bb12e999f,44667,1732382620918, seqNum=-1] 2024-11-23T17:26:38,064 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:26:38,065 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50652, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:26:38,068 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621. 2024-11-23T17:26:38,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:26:38,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:26:38,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:26:38,068 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:26:38,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4d257c1d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:26:38,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] client.ClusterIdFetcher(90): Going to request 215bb12e999f,39621,-1 for getting cluster id 2024-11-23T17:26:38,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:26:38,079 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d' 2024-11-23T17:26:38,079 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:26:38,080 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d" 2024-11-23T17:26:38,080 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1574dc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:26:38,080 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [215bb12e999f,39621,-1] 2024-11-23T17:26:38,080 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:26:38,080 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:26:38,081 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43216, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:26:38,082 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c349d0c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:26:38,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:26:38,085 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=215bb12e999f,44667,1732382620918, seqNum=-1] 2024-11-23T17:26:38,085 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:26:38,086 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50662, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:26:38,089 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T17:26:38,089 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithTargetName', locateType=CURRENT is [region=hbase:acl,,1732382625522.41264850465b123187f03a78b2521b39., hostname=215bb12e999f,41065,1732382621000, seqNum=2] 2024-11-23T17:26:38,089 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:26:38,091 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35276, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:26:38,093 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621. 2024-11-23T17:26:38,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:26:38,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:26:38,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:26:38,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-11-23T17:26:38,095 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:26:38,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-23T17:26:38,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] procedure2.ProcedureExecutor(1139): Stored pid=53, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-11-23T17:26:38,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 53 2024-11-23T17:26:38,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=53 2024-11-23T17:26:38,098 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-23T17:26:38,100 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-23T17:26:38,104 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-23T17:26:38,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741914_1090 (size=162) 2024-11-23T17:26:38,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741914_1090 (size=162) 2024-11-23T17:26:38,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741914_1090 (size=162) 2024-11-23T17:26:38,161 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-23T17:26:38,161 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=54, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f7d97637c77920082c0e9e41eceb5590}, {pid=55, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2c9151745ab74b8dedbd24f6a91a594a}] 2024-11-23T17:26:38,163 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=55, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2c9151745ab74b8dedbd24f6a91a594a 2024-11-23T17:26:38,163 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=54, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f7d97637c77920082c0e9e41eceb5590 2024-11-23T17:26:38,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=53 2024-11-23T17:26:38,320 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41065 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=55 2024-11-23T17:26:38,320 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44667 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=54 2024-11-23T17:26:38,320 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,1,1732382797003.2c9151745ab74b8dedbd24f6a91a594a. 2024-11-23T17:26:38,320 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,,1732382797003.f7d97637c77920082c0e9e41eceb5590. 2024-11-23T17:26:38,321 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HRegion(2902): Flushing 2c9151745ab74b8dedbd24f6a91a594a 1/1 column families, dataSize=2.87 KB heapSize=6.44 KB 2024-11-23T17:26:38,321 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HRegion(2902): Flushing f7d97637c77920082c0e9e41eceb5590 1/1 column families, dataSize=400 B heapSize=1.09 KB 2024-11-23T17:26:38,345 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithTargetName/f7d97637c77920082c0e9e41eceb5590/.tmp/cf/08dbd7170a084d088a140101149931bc is 71, key is 00a136e6cd6110dc65a4a1ed88bc536a/cf:q/1732382798012/Put/seqid=0 2024-11-23T17:26:38,360 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithTargetName/2c9151745ab74b8dedbd24f6a91a594a/.tmp/cf/50af8a1d2a8a482285ec9a02cd2e3a77 is 71, key is 1fe28ee0557c7a4d70cc7a41ef4c4566/cf:q/1732382798016/Put/seqid=0 2024-11-23T17:26:38,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741915_1091 (size=5490) 2024-11-23T17:26:38,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741915_1091 (size=5490) 2024-11-23T17:26:38,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741915_1091 (size=5490) 2024-11-23T17:26:38,382 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=400 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithTargetName/f7d97637c77920082c0e9e41eceb5590/.tmp/cf/08dbd7170a084d088a140101149931bc 2024-11-23T17:26:38,393 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithTargetName/f7d97637c77920082c0e9e41eceb5590/.tmp/cf/08dbd7170a084d088a140101149931bc as hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithTargetName/f7d97637c77920082c0e9e41eceb5590/cf/08dbd7170a084d088a140101149931bc 2024-11-23T17:26:38,400 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithTargetName/f7d97637c77920082c0e9e41eceb5590/cf/08dbd7170a084d088a140101149931bc, entries=6, sequenceid=6, filesize=5.4 K 2024-11-23T17:26:38,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741916_1092 (size=8122) 2024-11-23T17:26:38,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741916_1092 (size=8122) 2024-11-23T17:26:38,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741916_1092 (size=8122) 2024-11-23T17:26:38,402 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HRegion(3140): Finished flush of dataSize ~400 B/400, heapSize ~1.08 KB/1104, currentSize=0 B/0 for f7d97637c77920082c0e9e41eceb5590 in 80ms, sequenceid=6, compaction requested=false 2024-11-23T17:26:38,402 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HRegion(2603): Flush status journal for f7d97637c77920082c0e9e41eceb5590: 2024-11-23T17:26:38,402 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,,1732382797003.f7d97637c77920082c0e9e41eceb5590. for snaptb0-testExportWithTargetName completed. 2024-11-23T17:26:38,402 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,,1732382797003.f7d97637c77920082c0e9e41eceb5590.' region-info for snapshot=snaptb0-testExportWithTargetName 2024-11-23T17:26:38,402 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:26:38,402 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithTargetName/f7d97637c77920082c0e9e41eceb5590/cf/08dbd7170a084d088a140101149931bc] hfiles 2024-11-23T17:26:38,402 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithTargetName/f7d97637c77920082c0e9e41eceb5590/cf/08dbd7170a084d088a140101149931bc for snapshot=snaptb0-testExportWithTargetName 2024-11-23T17:26:38,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741917_1093 (size=109) 2024-11-23T17:26:38,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741917_1093 (size=109) 2024-11-23T17:26:38,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741917_1093 (size=109) 2024-11-23T17:26:38,412 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,,1732382797003.f7d97637c77920082c0e9e41eceb5590. 2024-11-23T17:26:38,412 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=54 2024-11-23T17:26:38,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.HMaster(4169): Remote procedure done, pid=54 2024-11-23T17:26:38,412 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithTargetName on region f7d97637c77920082c0e9e41eceb5590 2024-11-23T17:26:38,413 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=54, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f7d97637c77920082c0e9e41eceb5590 2024-11-23T17:26:38,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=53 2024-11-23T17:26:38,423 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=54, ppid=53, state=SUCCESS, hasLock=false; SnapshotRegionProcedure f7d97637c77920082c0e9e41eceb5590 in 254 msec 2024-11-23T17:26:38,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=53 2024-11-23T17:26:38,802 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.87 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithTargetName/2c9151745ab74b8dedbd24f6a91a594a/.tmp/cf/50af8a1d2a8a482285ec9a02cd2e3a77 2024-11-23T17:26:38,812 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithTargetName/2c9151745ab74b8dedbd24f6a91a594a/.tmp/cf/50af8a1d2a8a482285ec9a02cd2e3a77 as hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithTargetName/2c9151745ab74b8dedbd24f6a91a594a/cf/50af8a1d2a8a482285ec9a02cd2e3a77 2024-11-23T17:26:38,821 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithTargetName/2c9151745ab74b8dedbd24f6a91a594a/cf/50af8a1d2a8a482285ec9a02cd2e3a77, entries=44, sequenceid=6, filesize=7.9 K 2024-11-23T17:26:38,822 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HRegion(3140): Finished flush of dataSize ~2.87 KB/2936, heapSize ~6.42 KB/6576, currentSize=0 B/0 for 2c9151745ab74b8dedbd24f6a91a594a in 501ms, sequenceid=6, compaction requested=false 2024-11-23T17:26:38,822 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HRegion(2603): Flush status journal for 2c9151745ab74b8dedbd24f6a91a594a: 2024-11-23T17:26:38,822 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,1,1732382797003.2c9151745ab74b8dedbd24f6a91a594a. for snaptb0-testExportWithTargetName completed. 2024-11-23T17:26:38,823 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,1,1732382797003.2c9151745ab74b8dedbd24f6a91a594a.' region-info for snapshot=snaptb0-testExportWithTargetName 2024-11-23T17:26:38,823 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:26:38,823 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithTargetName/2c9151745ab74b8dedbd24f6a91a594a/cf/50af8a1d2a8a482285ec9a02cd2e3a77] hfiles 2024-11-23T17:26:38,823 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithTargetName/2c9151745ab74b8dedbd24f6a91a594a/cf/50af8a1d2a8a482285ec9a02cd2e3a77 for snapshot=snaptb0-testExportWithTargetName 2024-11-23T17:26:38,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741918_1094 (size=109) 2024-11-23T17:26:38,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741918_1094 (size=109) 2024-11-23T17:26:38,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741918_1094 (size=109) 2024-11-23T17:26:38,837 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,1,1732382797003.2c9151745ab74b8dedbd24f6a91a594a. 2024-11-23T17:26:38,837 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=55 2024-11-23T17:26:38,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.HMaster(4169): Remote procedure done, pid=55 2024-11-23T17:26:38,838 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithTargetName on region 2c9151745ab74b8dedbd24f6a91a594a 2024-11-23T17:26:38,838 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=55, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2c9151745ab74b8dedbd24f6a91a594a 2024-11-23T17:26:38,843 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=55, resume processing ppid=53 2024-11-23T17:26:38,843 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-23T17:26:38,843 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=55, ppid=53, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 2c9151745ab74b8dedbd24f6a91a594a in 679 msec 2024-11-23T17:26:38,844 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-23T17:26:38,845 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-23T17:26:38,848 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithTargetName 2024-11-23T17:26:38,849 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/.tmp/snaptb0-testExportWithTargetName 2024-11-23T17:26:38,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741919_1095 (size=627) 2024-11-23T17:26:38,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741919_1095 (size=627) 2024-11-23T17:26:38,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741919_1095 (size=627) 2024-11-23T17:26:38,874 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-23T17:26:38,881 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-23T17:26:38,881 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/.tmp/snaptb0-testExportWithTargetName to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/snaptb0-testExportWithTargetName 2024-11-23T17:26:38,883 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-23T17:26:38,883 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 53 2024-11-23T17:26:38,885 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=53, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } in 788 msec 2024-11-23T17:26:39,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=53 2024-11-23T17:26:39,237 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithTargetName completed 2024-11-23T17:26:39,237 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732382799237 2024-11-23T17:26:39,237 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:37605, tgtDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732382799237, rawTgtDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732382799237, srcFsUri=hdfs://localhost:37605, srcDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf 2024-11-23T17:26:39,271 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:37605, inputRoot=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf 2024-11-23T17:26:39,271 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1325804072_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732382799237, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732382799237/.hbase-snapshot/.tmp/testExportWithTargetName 2024-11-23T17:26:39,273 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-11-23T17:26:39,277 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/snaptb0-testExportWithTargetName to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732382799237/.hbase-snapshot/.tmp/testExportWithTargetName 2024-11-23T17:26:39,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741921_1097 (size=627) 2024-11-23T17:26:39,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741920_1096 (size=162) 2024-11-23T17:26:39,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741921_1097 (size=627) 2024-11-23T17:26:39,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741920_1096 (size=162) 2024-11-23T17:26:39,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741921_1097 (size=627) 2024-11-23T17:26:39,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741920_1096 (size=162) 2024-11-23T17:26:39,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741922_1098 (size=154) 2024-11-23T17:26:39,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741922_1098 (size=154) 2024-11-23T17:26:39,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741922_1098 (size=154) 2024-11-23T17:26:39,302 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:26:39,303 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:26:39,303 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:26:39,806 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName 2024-11-23T17:26:39,806 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName Metrics about Tables on a single HBase RegionServer 2024-11-23T17:26:39,807 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportFileSystemStateWithSplitRegion 2024-11-23T17:26:39,807 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSplitRegion 2024-11-23T17:26:40,365 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732382632246_0001_000001 (auth:SIMPLE) from 127.0.0.1:47876 2024-11-23T17:26:40,388 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-0_2/usercache/jenkins/appcache/application_1732382632246_0001/container_1732382632246_0001_01_000001/launch_container.sh] 2024-11-23T17:26:40,388 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-0_2/usercache/jenkins/appcache/application_1732382632246_0001/container_1732382632246_0001_01_000001/container_tokens] 2024-11-23T17:26:40,388 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-0_2/usercache/jenkins/appcache/application_1732382632246_0001/container_1732382632246_0001_01_000001/sysfs] 2024-11-23T17:26:40,406 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/hadoop-6354526516501977466.jar 2024-11-23T17:26:40,406 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:26:40,407 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:26:40,492 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/hadoop-13238016887216323812.jar 2024-11-23T17:26:40,492 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:26:40,493 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:26:40,493 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:26:40,493 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:26:40,493 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:26:40,494 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:26:40,494 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-23T17:26:40,494 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-23T17:26:40,494 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-23T17:26:40,495 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-23T17:26:40,495 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-23T17:26:40,495 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-23T17:26:40,495 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-23T17:26:40,495 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-23T17:26:40,496 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-23T17:26:40,496 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-23T17:26:40,496 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-23T17:26:40,497 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:26:40,497 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:26:40,497 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T17:26:40,497 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:26:40,497 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:26:40,498 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T17:26:40,498 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T17:26:40,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741923_1099 (size=6424742) 2024-11-23T17:26:40,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741923_1099 (size=6424742) 2024-11-23T17:26:40,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741923_1099 (size=6424742) 2024-11-23T17:26:40,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741924_1100 (size=131440) 2024-11-23T17:26:40,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741924_1100 (size=131440) 2024-11-23T17:26:40,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741924_1100 (size=131440) 2024-11-23T17:26:40,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741925_1101 (size=440957) 2024-11-23T17:26:40,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741925_1101 (size=440957) 2024-11-23T17:26:40,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741925_1101 (size=440957) 2024-11-23T17:26:40,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741926_1102 (size=4188619) 2024-11-23T17:26:40,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741926_1102 (size=4188619) 2024-11-23T17:26:40,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741926_1102 (size=4188619) 2024-11-23T17:26:40,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741927_1103 (size=1323991) 2024-11-23T17:26:40,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741927_1103 (size=1323991) 2024-11-23T17:26:40,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741927_1103 (size=1323991) 2024-11-23T17:26:41,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741928_1104 (size=903733) 2024-11-23T17:26:41,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741928_1104 (size=903733) 2024-11-23T17:26:41,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741928_1104 (size=903733) 2024-11-23T17:26:41,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741929_1105 (size=8360083) 2024-11-23T17:26:41,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741929_1105 (size=8360083) 2024-11-23T17:26:41,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741929_1105 (size=8360083) 2024-11-23T17:26:41,697 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-23T17:26:41,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741930_1106 (size=1877034) 2024-11-23T17:26:41,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741930_1106 (size=1877034) 2024-11-23T17:26:41,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741930_1106 (size=1877034) 2024-11-23T17:26:41,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741931_1107 (size=77835) 2024-11-23T17:26:41,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741931_1107 (size=77835) 2024-11-23T17:26:41,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741931_1107 (size=77835) 2024-11-23T17:26:41,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741932_1108 (size=30949) 2024-11-23T17:26:41,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741932_1108 (size=30949) 2024-11-23T17:26:41,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741932_1108 (size=30949) 2024-11-23T17:26:42,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741933_1109 (size=1597347) 2024-11-23T17:26:42,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741933_1109 (size=1597347) 2024-11-23T17:26:42,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741933_1109 (size=1597347) 2024-11-23T17:26:42,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741934_1110 (size=4695811) 2024-11-23T17:26:42,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741934_1110 (size=4695811) 2024-11-23T17:26:42,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741934_1110 (size=4695811) 2024-11-23T17:26:42,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741935_1111 (size=232957) 2024-11-23T17:26:42,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741935_1111 (size=232957) 2024-11-23T17:26:42,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741935_1111 (size=232957) 2024-11-23T17:26:42,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741936_1112 (size=127628) 2024-11-23T17:26:42,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741936_1112 (size=127628) 2024-11-23T17:26:42,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741936_1112 (size=127628) 2024-11-23T17:26:42,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741937_1113 (size=20406) 2024-11-23T17:26:42,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741937_1113 (size=20406) 2024-11-23T17:26:42,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741937_1113 (size=20406) 2024-11-23T17:26:42,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741938_1114 (size=5175431) 2024-11-23T17:26:42,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741938_1114 (size=5175431) 2024-11-23T17:26:42,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741938_1114 (size=5175431) 2024-11-23T17:26:42,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741939_1115 (size=217634) 2024-11-23T17:26:42,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741939_1115 (size=217634) 2024-11-23T17:26:42,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741939_1115 (size=217634) 2024-11-23T17:26:42,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741940_1116 (size=1832290) 2024-11-23T17:26:42,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741940_1116 (size=1832290) 2024-11-23T17:26:42,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741940_1116 (size=1832290) 2024-11-23T17:26:42,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741941_1117 (size=322274) 2024-11-23T17:26:42,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741941_1117 (size=322274) 2024-11-23T17:26:42,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741941_1117 (size=322274) 2024-11-23T17:26:42,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741942_1118 (size=503880) 2024-11-23T17:26:42,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741942_1118 (size=503880) 2024-11-23T17:26:42,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741942_1118 (size=503880) 2024-11-23T17:26:42,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741943_1119 (size=29229) 2024-11-23T17:26:42,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741943_1119 (size=29229) 2024-11-23T17:26:42,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741943_1119 (size=29229) 2024-11-23T17:26:42,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741944_1120 (size=24096) 2024-11-23T17:26:42,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741944_1120 (size=24096) 2024-11-23T17:26:42,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741944_1120 (size=24096) 2024-11-23T17:26:43,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741945_1121 (size=111872) 2024-11-23T17:26:43,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741945_1121 (size=111872) 2024-11-23T17:26:43,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741945_1121 (size=111872) 2024-11-23T17:26:43,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741946_1122 (size=45609) 2024-11-23T17:26:43,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741946_1122 (size=45609) 2024-11-23T17:26:43,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741946_1122 (size=45609) 2024-11-23T17:26:43,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741947_1123 (size=136454) 2024-11-23T17:26:43,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741947_1123 (size=136454) 2024-11-23T17:26:43,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741947_1123 (size=136454) 2024-11-23T17:26:43,535 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-23T17:26:43,544 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportWithTargetName' hfile list 2024-11-23T17:26:43,549 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=7.9 K 2024-11-23T17:26:43,549 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=5.4 K 2024-11-23T17:26:43,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741948_1124 (size=445) 2024-11-23T17:26:43,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741948_1124 (size=445) 2024-11-23T17:26:43,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741948_1124 (size=445) 2024-11-23T17:26:43,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741949_1125 (size=21) 2024-11-23T17:26:43,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741949_1125 (size=21) 2024-11-23T17:26:43,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741949_1125 (size=21) 2024-11-23T17:26:43,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741950_1126 (size=304005) 2024-11-23T17:26:43,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741950_1126 (size=304005) 2024-11-23T17:26:43,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741950_1126 (size=304005) 2024-11-23T17:26:43,643 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-23T17:26:43,644 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-23T17:26:44,222 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732382632246_0002_000001 (auth:SIMPLE) from 127.0.0.1:40162 2024-11-23T17:26:45,412 DEBUG [master/215bb12e999f:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 2c9151745ab74b8dedbd24f6a91a594a changed from -1.0 to 0.0, refreshing cache 2024-11-23T17:26:45,413 DEBUG [master/215bb12e999f:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region f7d97637c77920082c0e9e41eceb5590 changed from -1.0 to 0.0, refreshing cache 2024-11-23T17:26:56,224 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732382632246_0002_000001 (auth:SIMPLE) from 127.0.0.1:48776 2024-11-23T17:26:57,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741951_1127 (size=349703) 2024-11-23T17:26:57,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741951_1127 (size=349703) 2024-11-23T17:26:57,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741951_1127 (size=349703) 2024-11-23T17:26:58,969 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732382632246_0002_000001 (auth:SIMPLE) from 127.0.0.1:59108 2024-11-23T17:26:58,971 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732382632246_0002_000001 (auth:SIMPLE) from 127.0.0.1:51664 2024-11-23T17:27:08,089 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T17:27:11,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741952_1128 (size=5490) 2024-11-23T17:27:11,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741952_1128 (size=5490) 2024-11-23T17:27:11,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741952_1128 (size=5490) 2024-11-23T17:27:11,867 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-1_2/usercache/jenkins/appcache/application_1732382632246_0002/container_1732382632246_0002_01_000003/launch_container.sh] 2024-11-23T17:27:11,867 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-1_2/usercache/jenkins/appcache/application_1732382632246_0002/container_1732382632246_0002_01_000003/container_tokens] 2024-11-23T17:27:11,867 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-1_2/usercache/jenkins/appcache/application_1732382632246_0002/container_1732382632246_0002_01_000003/sysfs] 2024-11-23T17:27:14,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741954_1130 (size=8122) 2024-11-23T17:27:14,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741954_1130 (size=8122) 2024-11-23T17:27:14,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741954_1130 (size=8122) 2024-11-23T17:27:14,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741953_1129 (size=22159) 2024-11-23T17:27:14,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741953_1129 (size=22159) 2024-11-23T17:27:14,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741953_1129 (size=22159) 2024-11-23T17:27:14,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741955_1131 (size=465) 2024-11-23T17:27:14,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741955_1131 (size=465) 2024-11-23T17:27:14,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741955_1131 (size=465) 2024-11-23T17:27:15,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741956_1132 (size=22159) 2024-11-23T17:27:15,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741956_1132 (size=22159) 2024-11-23T17:27:15,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741956_1132 (size=22159) 2024-11-23T17:27:15,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741957_1133 (size=349703) 2024-11-23T17:27:15,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741957_1133 (size=349703) 2024-11-23T17:27:15,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741957_1133 (size=349703) 2024-11-23T17:27:17,160 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-11-23T17:27:17,161 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-11-23T17:27:17,167 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: testExportWithTargetName 2024-11-23T17:27:17,167 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-11-23T17:27:17,168 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-11-23T17:27:17,168 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1325804072_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/snaptb0-testExportWithTargetName at hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/snaptb0-testExportWithTargetName 2024-11-23T17:27:17,168 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/snaptb0-testExportWithTargetName/.snapshotinfo 2024-11-23T17:27:17,168 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/snaptb0-testExportWithTargetName/data.manifest 2024-11-23T17:27:17,168 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1325804072_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732382799237/.hbase-snapshot/testExportWithTargetName at hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732382799237/.hbase-snapshot/testExportWithTargetName 2024-11-23T17:27:17,169 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732382799237/.hbase-snapshot/testExportWithTargetName/.snapshotinfo 2024-11-23T17:27:17,169 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732382799237/.hbase-snapshot/testExportWithTargetName/data.manifest 2024-11-23T17:27:17,180 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportWithTargetName 2024-11-23T17:27:17,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] procedure2.ProcedureExecutor(1139): Stored pid=56, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithTargetName 2024-11-23T17:27:17,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=56 2024-11-23T17:27:17,183 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732382837183"}]},"ts":"1732382837183"} 2024-11-23T17:27:17,185 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=DISABLING in hbase:meta 2024-11-23T17:27:17,185 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithTargetName to state=DISABLING 2024-11-23T17:27:17,186 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=57, ppid=56, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithTargetName}] 2024-11-23T17:27:17,188 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=f7d97637c77920082c0e9e41eceb5590, UNASSIGN}, {pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=2c9151745ab74b8dedbd24f6a91a594a, UNASSIGN}] 2024-11-23T17:27:17,189 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=f7d97637c77920082c0e9e41eceb5590, UNASSIGN 2024-11-23T17:27:17,189 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=2c9151745ab74b8dedbd24f6a91a594a, UNASSIGN 2024-11-23T17:27:17,190 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=59 updating hbase:meta row=2c9151745ab74b8dedbd24f6a91a594a, regionState=CLOSING, regionLocation=215bb12e999f,41065,1732382621000 2024-11-23T17:27:17,190 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=58 updating hbase:meta row=f7d97637c77920082c0e9e41eceb5590, regionState=CLOSING, regionLocation=215bb12e999f,44667,1732382620918 2024-11-23T17:27:17,192 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=2c9151745ab74b8dedbd24f6a91a594a, UNASSIGN because future has completed 2024-11-23T17:27:17,192 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T17:27:17,192 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=60, ppid=59, state=RUNNABLE, hasLock=false; CloseRegionProcedure 2c9151745ab74b8dedbd24f6a91a594a, server=215bb12e999f,41065,1732382621000}] 2024-11-23T17:27:17,193 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=f7d97637c77920082c0e9e41eceb5590, UNASSIGN because future has completed 2024-11-23T17:27:17,193 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T17:27:17,193 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=61, ppid=58, state=RUNNABLE, hasLock=false; CloseRegionProcedure f7d97637c77920082c0e9e41eceb5590, server=215bb12e999f,44667,1732382620918}] 2024-11-23T17:27:17,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=56 2024-11-23T17:27:17,345 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(122): Close 2c9151745ab74b8dedbd24f6a91a594a 2024-11-23T17:27:17,345 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T17:27:17,345 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1722): Closing 2c9151745ab74b8dedbd24f6a91a594a, disabling compactions & flushes 2024-11-23T17:27:17,345 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,1,1732382797003.2c9151745ab74b8dedbd24f6a91a594a. 2024-11-23T17:27:17,345 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,1,1732382797003.2c9151745ab74b8dedbd24f6a91a594a. 2024-11-23T17:27:17,345 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,1,1732382797003.2c9151745ab74b8dedbd24f6a91a594a. after waiting 0 ms 2024-11-23T17:27:17,345 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,1,1732382797003.2c9151745ab74b8dedbd24f6a91a594a. 2024-11-23T17:27:17,346 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(122): Close f7d97637c77920082c0e9e41eceb5590 2024-11-23T17:27:17,346 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T17:27:17,346 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1722): Closing f7d97637c77920082c0e9e41eceb5590, disabling compactions & flushes 2024-11-23T17:27:17,346 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,,1732382797003.f7d97637c77920082c0e9e41eceb5590. 2024-11-23T17:27:17,346 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,,1732382797003.f7d97637c77920082c0e9e41eceb5590. 2024-11-23T17:27:17,346 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,,1732382797003.f7d97637c77920082c0e9e41eceb5590. after waiting 0 ms 2024-11-23T17:27:17,346 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,,1732382797003.f7d97637c77920082c0e9e41eceb5590. 2024-11-23T17:27:17,349 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithTargetName/2c9151745ab74b8dedbd24f6a91a594a/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-23T17:27:17,349 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithTargetName/f7d97637c77920082c0e9e41eceb5590/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-23T17:27:17,349 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:27:17,349 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:27:17,350 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,1,1732382797003.2c9151745ab74b8dedbd24f6a91a594a. 2024-11-23T17:27:17,350 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,,1732382797003.f7d97637c77920082c0e9e41eceb5590. 2024-11-23T17:27:17,350 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1676): Region close journal for 2c9151745ab74b8dedbd24f6a91a594a: Waiting for close lock at 1732382837345Running coprocessor pre-close hooks at 1732382837345Disabling compacts and flushes for region at 1732382837345Disabling writes for close at 1732382837345Writing region close event to WAL at 1732382837346 (+1 ms)Running coprocessor post-close hooks at 1732382837349 (+3 ms)Closed at 1732382837350 (+1 ms) 2024-11-23T17:27:17,350 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1676): Region close journal for f7d97637c77920082c0e9e41eceb5590: Waiting for close lock at 1732382837346Running coprocessor pre-close hooks at 1732382837346Disabling compacts and flushes for region at 1732382837346Disabling writes for close at 1732382837346Writing region close event to WAL at 1732382837346Running coprocessor post-close hooks at 1732382837349 (+3 ms)Closed at 1732382837350 (+1 ms) 2024-11-23T17:27:17,351 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(157): Closed f7d97637c77920082c0e9e41eceb5590 2024-11-23T17:27:17,352 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=58 updating hbase:meta row=f7d97637c77920082c0e9e41eceb5590, regionState=CLOSED 2024-11-23T17:27:17,352 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(157): Closed 2c9151745ab74b8dedbd24f6a91a594a 2024-11-23T17:27:17,353 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=59 updating hbase:meta row=2c9151745ab74b8dedbd24f6a91a594a, regionState=CLOSED 2024-11-23T17:27:17,354 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=61, ppid=58, state=RUNNABLE, hasLock=false; CloseRegionProcedure f7d97637c77920082c0e9e41eceb5590, server=215bb12e999f,44667,1732382620918 because future has completed 2024-11-23T17:27:17,355 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=60, ppid=59, state=RUNNABLE, hasLock=false; CloseRegionProcedure 2c9151745ab74b8dedbd24f6a91a594a, server=215bb12e999f,41065,1732382621000 because future has completed 2024-11-23T17:27:17,356 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=61, resume processing ppid=58 2024-11-23T17:27:17,356 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=61, ppid=58, state=SUCCESS, hasLock=false; CloseRegionProcedure f7d97637c77920082c0e9e41eceb5590, server=215bb12e999f,44667,1732382620918 in 162 msec 2024-11-23T17:27:17,357 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=60, resume processing ppid=59 2024-11-23T17:27:17,357 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=58, ppid=57, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=f7d97637c77920082c0e9e41eceb5590, UNASSIGN in 168 msec 2024-11-23T17:27:17,357 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=60, ppid=59, state=SUCCESS, hasLock=false; CloseRegionProcedure 2c9151745ab74b8dedbd24f6a91a594a, server=215bb12e999f,41065,1732382621000 in 164 msec 2024-11-23T17:27:17,359 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=59, resume processing ppid=57 2024-11-23T17:27:17,359 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=59, ppid=57, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=2c9151745ab74b8dedbd24f6a91a594a, UNASSIGN in 169 msec 2024-11-23T17:27:17,361 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=57, resume processing ppid=56 2024-11-23T17:27:17,361 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=57, ppid=56, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithTargetName in 173 msec 2024-11-23T17:27:17,362 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732382837362"}]},"ts":"1732382837362"} 2024-11-23T17:27:17,363 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=DISABLED in hbase:meta 2024-11-23T17:27:17,363 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithTargetName to state=DISABLED 2024-11-23T17:27:17,365 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=56, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithTargetName in 184 msec 2024-11-23T17:27:17,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=56 2024-11-23T17:27:17,497 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithTargetName completed 2024-11-23T17:27:17,497 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportWithTargetName 2024-11-23T17:27:17,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] procedure2.ProcedureExecutor(1139): Stored pid=62, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-23T17:27:17,499 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=62, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-23T17:27:17,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithTargetName 2024-11-23T17:27:17,500 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=62, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-23T17:27:17,503 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41065 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithTargetName 2024-11-23T17:27:17,505 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithTargetName/f7d97637c77920082c0e9e41eceb5590 2024-11-23T17:27:17,506 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithTargetName/2c9151745ab74b8dedbd24f6a91a594a 2024-11-23T17:27:17,508 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithTargetName/f7d97637c77920082c0e9e41eceb5590/cf, FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithTargetName/f7d97637c77920082c0e9e41eceb5590/recovered.edits] 2024-11-23T17:27:17,508 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithTargetName/2c9151745ab74b8dedbd24f6a91a594a/cf, FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithTargetName/2c9151745ab74b8dedbd24f6a91a594a/recovered.edits] 2024-11-23T17:27:17,512 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithTargetName/2c9151745ab74b8dedbd24f6a91a594a/cf/50af8a1d2a8a482285ec9a02cd2e3a77 to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/archive/data/default/testtb-testExportWithTargetName/2c9151745ab74b8dedbd24f6a91a594a/cf/50af8a1d2a8a482285ec9a02cd2e3a77 2024-11-23T17:27:17,512 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithTargetName/f7d97637c77920082c0e9e41eceb5590/cf/08dbd7170a084d088a140101149931bc to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/archive/data/default/testtb-testExportWithTargetName/f7d97637c77920082c0e9e41eceb5590/cf/08dbd7170a084d088a140101149931bc 2024-11-23T17:27:17,514 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithTargetName/f7d97637c77920082c0e9e41eceb5590/recovered.edits/9.seqid to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/archive/data/default/testtb-testExportWithTargetName/f7d97637c77920082c0e9e41eceb5590/recovered.edits/9.seqid 2024-11-23T17:27:17,514 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithTargetName/2c9151745ab74b8dedbd24f6a91a594a/recovered.edits/9.seqid to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/archive/data/default/testtb-testExportWithTargetName/2c9151745ab74b8dedbd24f6a91a594a/recovered.edits/9.seqid 2024-11-23T17:27:17,515 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithTargetName/f7d97637c77920082c0e9e41eceb5590 2024-11-23T17:27:17,515 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithTargetName/2c9151745ab74b8dedbd24f6a91a594a 2024-11-23T17:27:17,515 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithTargetName regions 2024-11-23T17:27:17,517 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=62, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-23T17:27:17,520 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithTargetName from hbase:meta 2024-11-23T17:27:17,522 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithTargetName' descriptor. 2024-11-23T17:27:17,523 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=62, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-23T17:27:17,523 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithTargetName' from region states. 2024-11-23T17:27:17,523 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName,,1732382797003.f7d97637c77920082c0e9e41eceb5590.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732382837523"}]},"ts":"9223372036854775807"} 2024-11-23T17:27:17,523 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName,1,1732382797003.2c9151745ab74b8dedbd24f6a91a594a.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732382837523"}]},"ts":"9223372036854775807"} 2024-11-23T17:27:17,525 INFO [PEWorker-5 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-23T17:27:17,525 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => f7d97637c77920082c0e9e41eceb5590, NAME => 'testtb-testExportWithTargetName,,1732382797003.f7d97637c77920082c0e9e41eceb5590.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 2c9151745ab74b8dedbd24f6a91a594a, NAME => 'testtb-testExportWithTargetName,1,1732382797003.2c9151745ab74b8dedbd24f6a91a594a.', STARTKEY => '1', ENDKEY => ''}] 2024-11-23T17:27:17,525 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithTargetName' as deleted. 2024-11-23T17:27:17,526 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732382837525"}]},"ts":"9223372036854775807"} 2024-11-23T17:27:17,528 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithTargetName state from META 2024-11-23T17:27:17,529 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=62, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-23T17:27:17,530 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=62, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithTargetName in 32 msec 2024-11-23T17:27:17,824 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-23T17:27:17,824 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x10168b8ee290003, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-23T17:27:17,824 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44667-0x10168b8ee290002, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-23T17:27:17,824 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45467-0x10168b8ee290001, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-23T17:27:17,825 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-11-23T17:27:17,825 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-11-23T17:27:17,825 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-11-23T17:27:17,825 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-11-23T17:27:18,038 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-23T17:27:18,038 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x10168b8ee290003, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-23T17:27:18,038 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45467-0x10168b8ee290001, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-23T17:27:18,038 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44667-0x10168b8ee290002, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-23T17:27:18,038 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:27:18,038 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45467-0x10168b8ee290001, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:27:18,038 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x10168b8ee290003, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:27:18,038 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44667-0x10168b8ee290002, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:27:18,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=62 2024-11-23T17:27:18,040 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithTargetName 2024-11-23T17:27:18,040 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithTargetName completed 2024-11-23T17:27:18,049 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithTargetName" type: DISABLED 2024-11-23T17:27:18,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithTargetName 2024-11-23T17:27:18,053 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithTargetName" type: DISABLED 2024-11-23T17:27:18,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithTargetName 2024-11-23T17:27:18,193 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportWithTargetName Thread=783 (was 764) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 102566) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1549690208) connection to localhost/127.0.0.1:35793 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1325804072_22 at /127.0.0.1:53588 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MoveIntermediateToDone Thread #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1325804072_22 at /127.0.0.1:51434 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-2039 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: ApplicationMasterLauncher #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool.commonPool-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1325804072_22 at /127.0.0.1:49462 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35793 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36999 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_140689743_1 at /127.0.0.1:49440 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool.commonPool-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data1 java.base@17.0.11/java.io.UnixFileSystem.delete0(Native Method) java.base@17.0.11/java.io.UnixFileSystem.delete(UnixFileSystem.java:316) java.base@17.0.11/java.io.File.delete(File.java:1073) app//org.apache.hadoop.fs.FileUtil.deleteImpl(FileUtil.java:246) app//org.apache.hadoop.fs.FileUtil.fullyDelete(FileUtil.java:193) app//org.apache.hadoop.fs.FileUtil.fullyDelete(FileUtil.java:170) app//org.apache.hadoop.hdfs.server.datanode.FileIoProvider.fullyDelete(FileIoProvider.java:515) app//org.apache.hadoop.hdfs.server.datanode.LocalReplica.deleteBlockData(LocalReplica.java:277) app//org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetAsyncDiskService$ReplicaFileDeleteTask.deleteFiles(FsDatasetAsyncDiskService.java:302) app//org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetAsyncDiskService$ReplicaFileDeleteTask.run(FsDatasetAsyncDiskService.java:353) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_140689743_1 at /127.0.0.1:53578 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data2 java.base@17.0.11/java.io.UnixFileSystem.delete0(Native Method) java.base@17.0.11/java.io.UnixFileSystem.delete(UnixFileSystem.java:316) java.base@17.0.11/java.io.File.delete(File.java:1073) app//org.apache.hadoop.fs.FileUtil.deleteImpl(FileUtil.java:246) app//org.apache.hadoop.fs.FileUtil.fullyDelete(FileUtil.java:193) app//org.apache.hadoop.fs.FileUtil.fullyDelete(FileUtil.java:170) app//org.apache.hadoop.hdfs.server.datanode.FileIoProvider.fullyDelete(FileIoProvider.java:515) app//org.apache.hadoop.hdfs.server.datanode.LocalReplica.deleteBlockData(LocalReplica.java:277) app//org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetAsyncDiskService$ReplicaFileDeleteTask.deleteFiles(FsDatasetAsyncDiskService.java:302) app//org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetAsyncDiskService$ReplicaFileDeleteTask.run(FsDatasetAsyncDiskService.java:353) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=807 (was 793) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1339 (was 1152) - SystemLoadAverage LEAK? -, ProcessCount=20 (was 19) - ProcessCount LEAK? -, AvailableMemoryMB=2354 (was 779) - AvailableMemoryMB LEAK? - 2024-11-23T17:27:18,193 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=783 is superior to 500 2024-11-23T17:27:18,209 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportWithResetTtl Thread=783, OpenFileDescriptor=807, MaxFileDescriptor=1048576, SystemLoadAverage=1339, ProcessCount=20, AvailableMemoryMB=2352 2024-11-23T17:27:18,209 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=783 is superior to 500 2024-11-23T17:27:18,211 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T17:27:18,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] procedure2.ProcedureExecutor(1139): Stored pid=63, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithResetTtl 2024-11-23T17:27:18,212 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T17:27:18,212 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:27:18,212 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithResetTtl" procId is: 63 2024-11-23T17:27:18,213 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T17:27:18,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-11-23T17:27:18,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741958_1134 (size=404) 2024-11-23T17:27:18,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741958_1134 (size=404) 2024-11-23T17:27:18,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741958_1134 (size=404) 2024-11-23T17:27:18,222 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => efe67ed8d86346ec0aee1231aa1ea894, NAME => 'testtb-testExportWithResetTtl,,1732382838210.efe67ed8d86346ec0aee1231aa1ea894.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf 2024-11-23T17:27:18,222 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => da60fa86fdbf8f39601828babeb53202, NAME => 'testtb-testExportWithResetTtl,1,1732382838210.da60fa86fdbf8f39601828babeb53202.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf 2024-11-23T17:27:18,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741960_1136 (size=65) 2024-11-23T17:27:18,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741960_1136 (size=65) 2024-11-23T17:27:18,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741960_1136 (size=65) 2024-11-23T17:27:18,233 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,,1732382838210.efe67ed8d86346ec0aee1231aa1ea894.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:27:18,233 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1722): Closing efe67ed8d86346ec0aee1231aa1ea894, disabling compactions & flushes 2024-11-23T17:27:18,233 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,,1732382838210.efe67ed8d86346ec0aee1231aa1ea894. 2024-11-23T17:27:18,233 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,,1732382838210.efe67ed8d86346ec0aee1231aa1ea894. 2024-11-23T17:27:18,233 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,,1732382838210.efe67ed8d86346ec0aee1231aa1ea894. after waiting 0 ms 2024-11-23T17:27:18,233 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,,1732382838210.efe67ed8d86346ec0aee1231aa1ea894. 2024-11-23T17:27:18,233 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,,1732382838210.efe67ed8d86346ec0aee1231aa1ea894. 2024-11-23T17:27:18,233 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1676): Region close journal for efe67ed8d86346ec0aee1231aa1ea894: Waiting for close lock at 1732382838233Disabling compacts and flushes for region at 1732382838233Disabling writes for close at 1732382838233Writing region close event to WAL at 1732382838233Closed at 1732382838233 2024-11-23T17:27:18,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741959_1135 (size=65) 2024-11-23T17:27:18,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741959_1135 (size=65) 2024-11-23T17:27:18,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741959_1135 (size=65) 2024-11-23T17:27:18,234 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,1,1732382838210.da60fa86fdbf8f39601828babeb53202.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:27:18,234 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1722): Closing da60fa86fdbf8f39601828babeb53202, disabling compactions & flushes 2024-11-23T17:27:18,234 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,1,1732382838210.da60fa86fdbf8f39601828babeb53202. 2024-11-23T17:27:18,234 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,1,1732382838210.da60fa86fdbf8f39601828babeb53202. 2024-11-23T17:27:18,235 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,1,1732382838210.da60fa86fdbf8f39601828babeb53202. after waiting 0 ms 2024-11-23T17:27:18,235 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,1,1732382838210.da60fa86fdbf8f39601828babeb53202. 2024-11-23T17:27:18,235 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,1,1732382838210.da60fa86fdbf8f39601828babeb53202. 2024-11-23T17:27:18,235 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1676): Region close journal for da60fa86fdbf8f39601828babeb53202: Waiting for close lock at 1732382838234Disabling compacts and flushes for region at 1732382838234Disabling writes for close at 1732382838235 (+1 ms)Writing region close event to WAL at 1732382838235Closed at 1732382838235 2024-11-23T17:27:18,236 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T17:27:18,236 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithResetTtl,,1732382838210.efe67ed8d86346ec0aee1231aa1ea894.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1732382838236"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732382838236"}]},"ts":"1732382838236"} 2024-11-23T17:27:18,236 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithResetTtl,1,1732382838210.da60fa86fdbf8f39601828babeb53202.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1732382838236"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732382838236"}]},"ts":"1732382838236"} 2024-11-23T17:27:18,239 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-23T17:27:18,239 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T17:27:18,240 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732382838239"}]},"ts":"1732382838239"} 2024-11-23T17:27:18,241 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=ENABLING in hbase:meta 2024-11-23T17:27:18,241 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {215bb12e999f=0} racks are {/default-rack=0} 2024-11-23T17:27:18,242 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-23T17:27:18,242 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-23T17:27:18,242 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-23T17:27:18,242 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-23T17:27:18,242 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-23T17:27:18,243 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-23T17:27:18,243 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-23T17:27:18,243 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-23T17:27:18,243 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-23T17:27:18,243 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-23T17:27:18,243 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=efe67ed8d86346ec0aee1231aa1ea894, ASSIGN}, {pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=da60fa86fdbf8f39601828babeb53202, ASSIGN}] 2024-11-23T17:27:18,244 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=efe67ed8d86346ec0aee1231aa1ea894, ASSIGN 2024-11-23T17:27:18,244 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=da60fa86fdbf8f39601828babeb53202, ASSIGN 2024-11-23T17:27:18,245 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=efe67ed8d86346ec0aee1231aa1ea894, ASSIGN; state=OFFLINE, location=215bb12e999f,41065,1732382621000; forceNewPlan=false, retain=false 2024-11-23T17:27:18,245 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=da60fa86fdbf8f39601828babeb53202, ASSIGN; state=OFFLINE, location=215bb12e999f,44667,1732382620918; forceNewPlan=false, retain=false 2024-11-23T17:27:18,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-11-23T17:27:18,395 INFO [215bb12e999f:39621 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-23T17:27:18,396 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=64 updating hbase:meta row=efe67ed8d86346ec0aee1231aa1ea894, regionState=OPENING, regionLocation=215bb12e999f,41065,1732382621000 2024-11-23T17:27:18,396 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=65 updating hbase:meta row=da60fa86fdbf8f39601828babeb53202, regionState=OPENING, regionLocation=215bb12e999f,44667,1732382620918 2024-11-23T17:27:18,399 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=da60fa86fdbf8f39601828babeb53202, ASSIGN because future has completed 2024-11-23T17:27:18,400 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=66, ppid=65, state=RUNNABLE, hasLock=false; OpenRegionProcedure da60fa86fdbf8f39601828babeb53202, server=215bb12e999f,44667,1732382620918}] 2024-11-23T17:27:18,402 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=efe67ed8d86346ec0aee1231aa1ea894, ASSIGN because future has completed 2024-11-23T17:27:18,402 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=67, ppid=64, state=RUNNABLE, hasLock=false; OpenRegionProcedure efe67ed8d86346ec0aee1231aa1ea894, server=215bb12e999f,41065,1732382621000}] 2024-11-23T17:27:18,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-11-23T17:27:18,557 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(132): Open testtb-testExportWithResetTtl,1,1732382838210.da60fa86fdbf8f39601828babeb53202. 2024-11-23T17:27:18,557 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7752): Opening region: {ENCODED => da60fa86fdbf8f39601828babeb53202, NAME => 'testtb-testExportWithResetTtl,1,1732382838210.da60fa86fdbf8f39601828babeb53202.', STARTKEY => '1', ENDKEY => ''} 2024-11-23T17:27:18,557 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithResetTtl,1,1732382838210.da60fa86fdbf8f39601828babeb53202. service=AccessControlService 2024-11-23T17:27:18,557 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] handler.AssignRegionHandler(132): Open testtb-testExportWithResetTtl,,1732382838210.efe67ed8d86346ec0aee1231aa1ea894. 2024-11-23T17:27:18,557 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:27:18,557 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7752): Opening region: {ENCODED => efe67ed8d86346ec0aee1231aa1ea894, NAME => 'testtb-testExportWithResetTtl,,1732382838210.efe67ed8d86346ec0aee1231aa1ea894.', STARTKEY => '', ENDKEY => '1'} 2024-11-23T17:27:18,558 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithResetTtl da60fa86fdbf8f39601828babeb53202 2024-11-23T17:27:18,558 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithResetTtl,,1732382838210.efe67ed8d86346ec0aee1231aa1ea894. service=AccessControlService 2024-11-23T17:27:18,558 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,1,1732382838210.da60fa86fdbf8f39601828babeb53202.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:27:18,558 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7794): checking encryption for da60fa86fdbf8f39601828babeb53202 2024-11-23T17:27:18,558 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:27:18,558 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7797): checking classloading for da60fa86fdbf8f39601828babeb53202 2024-11-23T17:27:18,558 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithResetTtl efe67ed8d86346ec0aee1231aa1ea894 2024-11-23T17:27:18,558 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,,1732382838210.efe67ed8d86346ec0aee1231aa1ea894.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:27:18,558 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7794): checking encryption for efe67ed8d86346ec0aee1231aa1ea894 2024-11-23T17:27:18,558 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7797): checking classloading for efe67ed8d86346ec0aee1231aa1ea894 2024-11-23T17:27:18,562 INFO [StoreOpener-da60fa86fdbf8f39601828babeb53202-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region da60fa86fdbf8f39601828babeb53202 2024-11-23T17:27:18,563 INFO [StoreOpener-efe67ed8d86346ec0aee1231aa1ea894-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region efe67ed8d86346ec0aee1231aa1ea894 2024-11-23T17:27:18,564 INFO [StoreOpener-da60fa86fdbf8f39601828babeb53202-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region da60fa86fdbf8f39601828babeb53202 columnFamilyName cf 2024-11-23T17:27:18,564 DEBUG [StoreOpener-da60fa86fdbf8f39601828babeb53202-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:27:18,565 INFO [StoreOpener-da60fa86fdbf8f39601828babeb53202-1 {}] regionserver.HStore(327): Store=da60fa86fdbf8f39601828babeb53202/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T17:27:18,565 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1038): replaying wal for da60fa86fdbf8f39601828babeb53202 2024-11-23T17:27:18,566 INFO [StoreOpener-efe67ed8d86346ec0aee1231aa1ea894-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region efe67ed8d86346ec0aee1231aa1ea894 columnFamilyName cf 2024-11-23T17:27:18,566 DEBUG [StoreOpener-efe67ed8d86346ec0aee1231aa1ea894-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:27:18,566 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithResetTtl/da60fa86fdbf8f39601828babeb53202 2024-11-23T17:27:18,567 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithResetTtl/da60fa86fdbf8f39601828babeb53202 2024-11-23T17:27:18,567 INFO [StoreOpener-efe67ed8d86346ec0aee1231aa1ea894-1 {}] regionserver.HStore(327): Store=efe67ed8d86346ec0aee1231aa1ea894/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T17:27:18,567 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1048): stopping wal replay for da60fa86fdbf8f39601828babeb53202 2024-11-23T17:27:18,567 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1060): Cleaning up temporary data for da60fa86fdbf8f39601828babeb53202 2024-11-23T17:27:18,567 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1038): replaying wal for efe67ed8d86346ec0aee1231aa1ea894 2024-11-23T17:27:18,568 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithResetTtl/efe67ed8d86346ec0aee1231aa1ea894 2024-11-23T17:27:18,568 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithResetTtl/efe67ed8d86346ec0aee1231aa1ea894 2024-11-23T17:27:18,569 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1093): writing seq id for da60fa86fdbf8f39601828babeb53202 2024-11-23T17:27:18,569 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1048): stopping wal replay for efe67ed8d86346ec0aee1231aa1ea894 2024-11-23T17:27:18,569 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1060): Cleaning up temporary data for efe67ed8d86346ec0aee1231aa1ea894 2024-11-23T17:27:18,571 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1093): writing seq id for efe67ed8d86346ec0aee1231aa1ea894 2024-11-23T17:27:18,577 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithResetTtl/efe67ed8d86346ec0aee1231aa1ea894/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T17:27:18,577 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithResetTtl/da60fa86fdbf8f39601828babeb53202/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T17:27:18,578 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1114): Opened efe67ed8d86346ec0aee1231aa1ea894; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74753747, jitterRate=0.11391763389110565}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T17:27:18,578 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1122): Running coprocessor post-open hooks for efe67ed8d86346ec0aee1231aa1ea894 2024-11-23T17:27:18,578 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1114): Opened da60fa86fdbf8f39601828babeb53202; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70345565, jitterRate=0.0482306033372879}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T17:27:18,578 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1122): Running coprocessor post-open hooks for da60fa86fdbf8f39601828babeb53202 2024-11-23T17:27:18,578 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1006): Region open journal for efe67ed8d86346ec0aee1231aa1ea894: Running coprocessor pre-open hook at 1732382838559Writing region info on filesystem at 1732382838559Initializing all the Stores at 1732382838559Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732382838559Cleaning up temporary data from old regions at 1732382838569 (+10 ms)Running coprocessor post-open hooks at 1732382838578 (+9 ms)Region opened successfully at 1732382838578 2024-11-23T17:27:18,578 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1006): Region open journal for da60fa86fdbf8f39601828babeb53202: Running coprocessor pre-open hook at 1732382838558Writing region info on filesystem at 1732382838558Initializing all the Stores at 1732382838559 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732382838559Cleaning up temporary data from old regions at 1732382838567 (+8 ms)Running coprocessor post-open hooks at 1732382838578 (+11 ms)Region opened successfully at 1732382838578 2024-11-23T17:27:18,580 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithResetTtl,,1732382838210.efe67ed8d86346ec0aee1231aa1ea894., pid=67, masterSystemTime=1732382838554 2024-11-23T17:27:18,580 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithResetTtl,1,1732382838210.da60fa86fdbf8f39601828babeb53202., pid=66, masterSystemTime=1732382838553 2024-11-23T17:27:18,582 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithResetTtl,,1732382838210.efe67ed8d86346ec0aee1231aa1ea894. 2024-11-23T17:27:18,582 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] handler.AssignRegionHandler(153): Opened testtb-testExportWithResetTtl,,1732382838210.efe67ed8d86346ec0aee1231aa1ea894. 2024-11-23T17:27:18,582 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=64 updating hbase:meta row=efe67ed8d86346ec0aee1231aa1ea894, regionState=OPEN, openSeqNum=2, regionLocation=215bb12e999f,41065,1732382621000 2024-11-23T17:27:18,583 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithResetTtl,1,1732382838210.da60fa86fdbf8f39601828babeb53202. 2024-11-23T17:27:18,583 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(153): Opened testtb-testExportWithResetTtl,1,1732382838210.da60fa86fdbf8f39601828babeb53202. 2024-11-23T17:27:18,583 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=65 updating hbase:meta row=da60fa86fdbf8f39601828babeb53202, regionState=OPEN, openSeqNum=2, regionLocation=215bb12e999f,44667,1732382620918 2024-11-23T17:27:18,585 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=67, ppid=64, state=RUNNABLE, hasLock=false; OpenRegionProcedure efe67ed8d86346ec0aee1231aa1ea894, server=215bb12e999f,41065,1732382621000 because future has completed 2024-11-23T17:27:18,587 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=66, ppid=65, state=RUNNABLE, hasLock=false; OpenRegionProcedure da60fa86fdbf8f39601828babeb53202, server=215bb12e999f,44667,1732382620918 because future has completed 2024-11-23T17:27:18,588 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=67, resume processing ppid=64 2024-11-23T17:27:18,588 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=67, ppid=64, state=SUCCESS, hasLock=false; OpenRegionProcedure efe67ed8d86346ec0aee1231aa1ea894, server=215bb12e999f,41065,1732382621000 in 184 msec 2024-11-23T17:27:18,590 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=66, resume processing ppid=65 2024-11-23T17:27:18,590 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=64, ppid=63, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=efe67ed8d86346ec0aee1231aa1ea894, ASSIGN in 346 msec 2024-11-23T17:27:18,590 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=66, ppid=65, state=SUCCESS, hasLock=false; OpenRegionProcedure da60fa86fdbf8f39601828babeb53202, server=215bb12e999f,44667,1732382620918 in 188 msec 2024-11-23T17:27:18,592 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=65, resume processing ppid=63 2024-11-23T17:27:18,592 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=65, ppid=63, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=da60fa86fdbf8f39601828babeb53202, ASSIGN in 347 msec 2024-11-23T17:27:18,593 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T17:27:18,593 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732382838593"}]},"ts":"1732382838593"} 2024-11-23T17:27:18,594 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=ENABLED in hbase:meta 2024-11-23T17:27:18,595 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T17:27:18,595 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithResetTtl jenkins: RWXCA 2024-11-23T17:27:18,598 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41065 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-11-23T17:27:18,809 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x10168b8ee290003, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:27:18,809 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44667-0x10168b8ee290002, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:27:18,809 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:27:18,809 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45467-0x10168b8ee290001, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:27:18,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-11-23T17:27:19,010 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-23T17:27:19,010 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-23T17:27:19,010 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-23T17:27:19,010 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-23T17:27:19,012 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=63, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithResetTtl in 799 msec 2024-11-23T17:27:19,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-11-23T17:27:19,347 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithResetTtl completed 2024-11-23T17:27:19,347 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportWithResetTtl get assigned. Timeout = 60000ms 2024-11-23T17:27:19,347 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T17:27:19,352 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportWithResetTtl assigned to meta. Checking AM states. 2024-11-23T17:27:19,352 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T17:27:19,352 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportWithResetTtl assigned. 2024-11-23T17:27:19,352 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-23T17:27:19,356 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-11-23T17:27:19,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732382839356 (current time:1732382839356). 2024-11-23T17:27:19,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-23T17:27:19,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithResetTtl VERSION not specified, setting to 2 2024-11-23T17:27:19,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-23T17:27:19,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5dafded8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:27:19,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] client.ClusterIdFetcher(90): Going to request 215bb12e999f,39621,-1 for getting cluster id 2024-11-23T17:27:19,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:27:19,358 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d' 2024-11-23T17:27:19,358 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:27:19,358 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d" 2024-11-23T17:27:19,358 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@586a3084, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:27:19,358 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [215bb12e999f,39621,-1] 2024-11-23T17:27:19,359 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:27:19,359 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:27:19,360 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42950, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:27:19,361 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6ecefb97, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:27:19,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:27:19,362 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=215bb12e999f,44667,1732382620918, seqNum=-1] 2024-11-23T17:27:19,362 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:27:19,364 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56222, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:27:19,365 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621. 2024-11-23T17:27:19,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:27:19,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:27:19,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:27:19,365 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:27:19,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1c46eeb1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:27:19,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] client.ClusterIdFetcher(90): Going to request 215bb12e999f,39621,-1 for getting cluster id 2024-11-23T17:27:19,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:27:19,367 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d' 2024-11-23T17:27:19,367 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:27:19,367 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d" 2024-11-23T17:27:19,367 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6a0dab00, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:27:19,368 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [215bb12e999f,39621,-1] 2024-11-23T17:27:19,368 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:27:19,368 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:27:19,368 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42974, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:27:19,369 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2d91ef6e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:27:19,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:27:19,371 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=215bb12e999f,44667,1732382620918, seqNum=-1] 2024-11-23T17:27:19,371 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:27:19,372 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56228, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:27:19,374 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1732382625522.41264850465b123187f03a78b2521b39., hostname=215bb12e999f,41065,1732382621000, seqNum=2] 2024-11-23T17:27:19,374 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:27:19,376 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44320, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:27:19,377 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621. 2024-11-23T17:27:19,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:27:19,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:27:19,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:27:19,378 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:27:19,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-11-23T17:27:19,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-23T17:27:19,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] procedure2.ProcedureExecutor(1139): Stored pid=68, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-11-23T17:27:19,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 68 2024-11-23T17:27:19,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=68 2024-11-23T17:27:19,382 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-23T17:27:19,384 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-23T17:27:19,387 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-23T17:27:19,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741961_1137 (size=161) 2024-11-23T17:27:19,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741961_1137 (size=161) 2024-11-23T17:27:19,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741961_1137 (size=161) 2024-11-23T17:27:19,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=68 2024-11-23T17:27:19,659 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-0_1/usercache/jenkins/appcache/application_1732382632246_0002/container_1732382632246_0002_01_000002/launch_container.sh] 2024-11-23T17:27:19,659 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-0_1/usercache/jenkins/appcache/application_1732382632246_0002/container_1732382632246_0002_01_000002/container_tokens] 2024-11-23T17:27:19,659 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-0_1/usercache/jenkins/appcache/application_1732382632246_0002/container_1732382632246_0002_01_000002/sysfs] 2024-11-23T17:27:19,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=68 2024-11-23T17:27:19,806 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl 2024-11-23T17:27:19,806 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl Metrics about Tables on a single HBase RegionServer 2024-11-23T17:27:19,807 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName 2024-11-23T17:27:19,815 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-23T17:27:19,815 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=69, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure efe67ed8d86346ec0aee1231aa1ea894}, {pid=70, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure da60fa86fdbf8f39601828babeb53202}] 2024-11-23T17:27:19,816 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=69, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure efe67ed8d86346ec0aee1231aa1ea894 2024-11-23T17:27:19,816 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=70, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure da60fa86fdbf8f39601828babeb53202 2024-11-23T17:27:19,968 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41065 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=69 2024-11-23T17:27:19,968 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44667 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=70 2024-11-23T17:27:19,968 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,1,1732382838210.da60fa86fdbf8f39601828babeb53202. 2024-11-23T17:27:19,968 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,,1732382838210.efe67ed8d86346ec0aee1231aa1ea894. 2024-11-23T17:27:19,969 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.HRegion(2603): Flush status journal for da60fa86fdbf8f39601828babeb53202: 2024-11-23T17:27:19,969 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,1,1732382838210.da60fa86fdbf8f39601828babeb53202. for emptySnaptb0-testExportWithResetTtl completed. 2024-11-23T17:27:19,969 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,1,1732382838210.da60fa86fdbf8f39601828babeb53202.' region-info for snapshot=emptySnaptb0-testExportWithResetTtl 2024-11-23T17:27:19,969 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:27:19,969 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-23T17:27:19,969 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.HRegion(2603): Flush status journal for efe67ed8d86346ec0aee1231aa1ea894: 2024-11-23T17:27:19,969 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,,1732382838210.efe67ed8d86346ec0aee1231aa1ea894. for emptySnaptb0-testExportWithResetTtl completed. 2024-11-23T17:27:19,969 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,,1732382838210.efe67ed8d86346ec0aee1231aa1ea894.' region-info for snapshot=emptySnaptb0-testExportWithResetTtl 2024-11-23T17:27:19,969 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:27:19,969 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-23T17:27:19,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741963_1139 (size=68) 2024-11-23T17:27:19,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741963_1139 (size=68) 2024-11-23T17:27:19,984 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,,1732382838210.efe67ed8d86346ec0aee1231aa1ea894. 2024-11-23T17:27:19,984 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=69 2024-11-23T17:27:19,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.HMaster(4169): Remote procedure done, pid=69 2024-11-23T17:27:19,985 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithResetTtl on region efe67ed8d86346ec0aee1231aa1ea894 2024-11-23T17:27:19,985 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=69, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure efe67ed8d86346ec0aee1231aa1ea894 2024-11-23T17:27:19,988 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=69, ppid=68, state=SUCCESS, hasLock=false; SnapshotRegionProcedure efe67ed8d86346ec0aee1231aa1ea894 in 171 msec 2024-11-23T17:27:19,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741962_1138 (size=68) 2024-11-23T17:27:19,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741962_1138 (size=68) 2024-11-23T17:27:19,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741963_1139 (size=68) 2024-11-23T17:27:19,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741962_1138 (size=68) 2024-11-23T17:27:20,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=68 2024-11-23T17:27:20,029 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithResetTtl' 2024-11-23T17:27:20,390 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,1,1732382838210.da60fa86fdbf8f39601828babeb53202. 2024-11-23T17:27:20,390 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=70 2024-11-23T17:27:20,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.HMaster(4169): Remote procedure done, pid=70 2024-11-23T17:27:20,391 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithResetTtl on region da60fa86fdbf8f39601828babeb53202 2024-11-23T17:27:20,391 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=70, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure da60fa86fdbf8f39601828babeb53202 2024-11-23T17:27:20,397 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=70, resume processing ppid=68 2024-11-23T17:27:20,398 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=70, ppid=68, state=SUCCESS, hasLock=false; SnapshotRegionProcedure da60fa86fdbf8f39601828babeb53202 in 577 msec 2024-11-23T17:27:20,398 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-23T17:27:20,399 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-23T17:27:20,399 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-23T17:27:20,399 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithResetTtl 2024-11-23T17:27:20,400 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithResetTtl 2024-11-23T17:27:20,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741964_1140 (size=543) 2024-11-23T17:27:20,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741964_1140 (size=543) 2024-11-23T17:27:20,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741964_1140 (size=543) 2024-11-23T17:27:20,421 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-23T17:27:20,428 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-23T17:27:20,428 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithResetTtl to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/emptySnaptb0-testExportWithResetTtl 2024-11-23T17:27:20,430 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-23T17:27:20,430 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 68 2024-11-23T17:27:20,432 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=68, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } in 1.0510 sec 2024-11-23T17:27:20,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=68 2024-11-23T17:27:20,516 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithResetTtl completed 2024-11-23T17:27:20,523 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithResetTtl', row='0af7ab56a809013b8ba270c06f524b709', locateType=CURRENT is [region=testtb-testExportWithResetTtl,,1732382838210.efe67ed8d86346ec0aee1231aa1ea894., hostname=215bb12e999f,41065,1732382621000, seqNum=2] 2024-11-23T17:27:20,525 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithResetTtl', row='1fe6f0e17e47baf758a72c3ec1256411a', locateType=CURRENT is [region=testtb-testExportWithResetTtl,1,1732382838210.da60fa86fdbf8f39601828babeb53202., hostname=215bb12e999f,44667,1732382620918, seqNum=2] 2024-11-23T17:27:20,526 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithResetTtl', row='2d6ddd01415e0c4a78fe0bd4c352cd7f0', locateType=CURRENT is [region=testtb-testExportWithResetTtl,1,1732382838210.da60fa86fdbf8f39601828babeb53202., hostname=215bb12e999f,44667,1732382620918, seqNum=2] 2024-11-23T17:27:20,527 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithResetTtl', row='37d84b4443eedb0d8b7f961aeb82ea9e2', locateType=CURRENT is [region=testtb-testExportWithResetTtl,1,1732382838210.da60fa86fdbf8f39601828babeb53202., hostname=215bb12e999f,44667,1732382620918, seqNum=2] 2024-11-23T17:27:20,528 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithResetTtl', row='5b6f95c1261a51e774390229ef7ab7006', locateType=CURRENT is [region=testtb-testExportWithResetTtl,1,1732382838210.da60fa86fdbf8f39601828babeb53202., hostname=215bb12e999f,44667,1732382620918, seqNum=2] 2024-11-23T17:27:20,530 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithResetTtl', row='414ec5329f41158bd854550cdca18dc12', locateType=CURRENT is [region=testtb-testExportWithResetTtl,1,1732382838210.da60fa86fdbf8f39601828babeb53202., hostname=215bb12e999f,44667,1732382620918, seqNum=2] 2024-11-23T17:27:20,531 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithResetTtl', row='637a0db1f3b581e5a94fa34d83ff9c2c3', locateType=CURRENT is [region=testtb-testExportWithResetTtl,1,1732382838210.da60fa86fdbf8f39601828babeb53202., hostname=215bb12e999f,44667,1732382620918, seqNum=2] 2024-11-23T17:27:20,533 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41065 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithResetTtl,,1732382838210.efe67ed8d86346ec0aee1231aa1ea894. with WAL disabled. Data may be lost in the event of a crash. 2024-11-23T17:27:20,537 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44667 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithResetTtl,1,1732382838210.da60fa86fdbf8f39601828babeb53202. with WAL disabled. Data may be lost in the event of a crash. 2024-11-23T17:27:20,539 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-23T17:27:20,542 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithResetTtl 2024-11-23T17:27:20,543 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithResetTtl,,1732382838210.efe67ed8d86346ec0aee1231aa1ea894. 2024-11-23T17:27:20,543 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T17:27:20,545 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-23T17:27:20,552 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-23T17:27:20,560 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-23T17:27:20,564 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-11-23T17:27:20,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732382840564 (current time:1732382840564). 2024-11-23T17:27:20,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-23T17:27:20,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithResetTtl VERSION not specified, setting to 2 2024-11-23T17:27:20,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-23T17:27:20,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@164f65d5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:27:20,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.ClusterIdFetcher(90): Going to request 215bb12e999f,39621,-1 for getting cluster id 2024-11-23T17:27:20,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:27:20,566 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d' 2024-11-23T17:27:20,566 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:27:20,567 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d" 2024-11-23T17:27:20,567 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1d34a83, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:27:20,567 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [215bb12e999f,39621,-1] 2024-11-23T17:27:20,567 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:27:20,567 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:27:20,568 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42996, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:27:20,569 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@338bf1f0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:27:20,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:27:20,570 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=215bb12e999f,44667,1732382620918, seqNum=-1] 2024-11-23T17:27:20,570 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:27:20,571 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56232, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:27:20,572 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621. 2024-11-23T17:27:20,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:27:20,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:27:20,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:27:20,573 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:27:20,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@630a44c8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:27:20,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.ClusterIdFetcher(90): Going to request 215bb12e999f,39621,-1 for getting cluster id 2024-11-23T17:27:20,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:27:20,575 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d' 2024-11-23T17:27:20,575 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:27:20,575 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d" 2024-11-23T17:27:20,576 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47fe00d0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:27:20,576 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [215bb12e999f,39621,-1] 2024-11-23T17:27:20,576 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:27:20,576 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:27:20,577 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43010, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:27:20,578 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5301d678, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:27:20,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:27:20,579 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=215bb12e999f,44667,1732382620918, seqNum=-1] 2024-11-23T17:27:20,579 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:27:20,580 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56240, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:27:20,582 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1732382625522.41264850465b123187f03a78b2521b39., hostname=215bb12e999f,41065,1732382621000, seqNum=2] 2024-11-23T17:27:20,582 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:27:20,584 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44332, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:27:20,585 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621. 2024-11-23T17:27:20,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:27:20,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:27:20,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:27:20,585 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:27:20,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-11-23T17:27:20,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-23T17:27:20,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure2.ProcedureExecutor(1139): Stored pid=71, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-11-23T17:27:20,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 71 2024-11-23T17:27:20,589 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-23T17:27:20,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=71 2024-11-23T17:27:20,590 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-23T17:27:20,593 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-23T17:27:20,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741965_1141 (size=156) 2024-11-23T17:27:20,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741965_1141 (size=156) 2024-11-23T17:27:20,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741965_1141 (size=156) 2024-11-23T17:27:20,601 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-23T17:27:20,602 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=72, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure efe67ed8d86346ec0aee1231aa1ea894}, {pid=73, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure da60fa86fdbf8f39601828babeb53202}] 2024-11-23T17:27:20,603 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=72, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure efe67ed8d86346ec0aee1231aa1ea894 2024-11-23T17:27:20,603 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=73, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure da60fa86fdbf8f39601828babeb53202 2024-11-23T17:27:20,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=71 2024-11-23T17:27:20,754 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44667 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=73 2024-11-23T17:27:20,754 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41065 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=72 2024-11-23T17:27:20,755 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,1,1732382838210.da60fa86fdbf8f39601828babeb53202. 2024-11-23T17:27:20,755 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,,1732382838210.efe67ed8d86346ec0aee1231aa1ea894. 2024-11-23T17:27:20,755 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(2902): Flushing efe67ed8d86346ec0aee1231aa1ea894 1/1 column families, dataSize=333 B heapSize=976 B 2024-11-23T17:27:20,755 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(2902): Flushing da60fa86fdbf8f39601828babeb53202 1/1 column families, dataSize=2.93 KB heapSize=6.58 KB 2024-11-23T17:27:20,775 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithResetTtl/efe67ed8d86346ec0aee1231aa1ea894/.tmp/cf/b0a2567cb00d44208980210d938dd942 is 71, key is 00ea773992274892754d819dd1cd1e5f/cf:q/1732382840532/Put/seqid=0 2024-11-23T17:27:20,781 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithResetTtl/da60fa86fdbf8f39601828babeb53202/.tmp/cf/5f9c791b0e484b74802379211e6cc15f is 71, key is 1170b6e56a127298b8a677c8cc1589d0/cf:q/1732382840537/Put/seqid=0 2024-11-23T17:27:20,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741966_1142 (size=5424) 2024-11-23T17:27:20,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741966_1142 (size=5424) 2024-11-23T17:27:20,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741966_1142 (size=5424) 2024-11-23T17:27:20,806 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=333 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithResetTtl/efe67ed8d86346ec0aee1231aa1ea894/.tmp/cf/b0a2567cb00d44208980210d938dd942 2024-11-23T17:27:20,815 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithResetTtl/efe67ed8d86346ec0aee1231aa1ea894/.tmp/cf/b0a2567cb00d44208980210d938dd942 as hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithResetTtl/efe67ed8d86346ec0aee1231aa1ea894/cf/b0a2567cb00d44208980210d938dd942 2024-11-23T17:27:20,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741967_1143 (size=8188) 2024-11-23T17:27:20,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741967_1143 (size=8188) 2024-11-23T17:27:20,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741967_1143 (size=8188) 2024-11-23T17:27:20,819 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.93 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithResetTtl/da60fa86fdbf8f39601828babeb53202/.tmp/cf/5f9c791b0e484b74802379211e6cc15f 2024-11-23T17:27:20,824 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithResetTtl/efe67ed8d86346ec0aee1231aa1ea894/cf/b0a2567cb00d44208980210d938dd942, entries=5, sequenceid=6, filesize=5.3 K 2024-11-23T17:27:20,825 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(3140): Finished flush of dataSize ~333 B/333, heapSize ~960 B/960, currentSize=0 B/0 for efe67ed8d86346ec0aee1231aa1ea894 in 70ms, sequenceid=6, compaction requested=false 2024-11-23T17:27:20,825 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(2603): Flush status journal for efe67ed8d86346ec0aee1231aa1ea894: 2024-11-23T17:27:20,825 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,,1732382838210.efe67ed8d86346ec0aee1231aa1ea894. for snaptb0-testExportWithResetTtl completed. 2024-11-23T17:27:20,825 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,,1732382838210.efe67ed8d86346ec0aee1231aa1ea894.' region-info for snapshot=snaptb0-testExportWithResetTtl 2024-11-23T17:27:20,825 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:27:20,825 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithResetTtl/efe67ed8d86346ec0aee1231aa1ea894/cf/b0a2567cb00d44208980210d938dd942] hfiles 2024-11-23T17:27:20,825 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithResetTtl/efe67ed8d86346ec0aee1231aa1ea894/cf/b0a2567cb00d44208980210d938dd942 for snapshot=snaptb0-testExportWithResetTtl 2024-11-23T17:27:20,831 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithResetTtl/da60fa86fdbf8f39601828babeb53202/.tmp/cf/5f9c791b0e484b74802379211e6cc15f as hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithResetTtl/da60fa86fdbf8f39601828babeb53202/cf/5f9c791b0e484b74802379211e6cc15f 2024-11-23T17:27:20,838 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithResetTtl/da60fa86fdbf8f39601828babeb53202/cf/5f9c791b0e484b74802379211e6cc15f, entries=45, sequenceid=6, filesize=8.0 K 2024-11-23T17:27:20,839 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(3140): Finished flush of dataSize ~2.93 KB/3003, heapSize ~6.56 KB/6720, currentSize=0 B/0 for da60fa86fdbf8f39601828babeb53202 in 84ms, sequenceid=6, compaction requested=false 2024-11-23T17:27:20,839 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(2603): Flush status journal for da60fa86fdbf8f39601828babeb53202: 2024-11-23T17:27:20,840 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,1,1732382838210.da60fa86fdbf8f39601828babeb53202. for snaptb0-testExportWithResetTtl completed. 2024-11-23T17:27:20,840 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,1,1732382838210.da60fa86fdbf8f39601828babeb53202.' region-info for snapshot=snaptb0-testExportWithResetTtl 2024-11-23T17:27:20,840 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:27:20,840 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithResetTtl/da60fa86fdbf8f39601828babeb53202/cf/5f9c791b0e484b74802379211e6cc15f] hfiles 2024-11-23T17:27:20,840 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithResetTtl/da60fa86fdbf8f39601828babeb53202/cf/5f9c791b0e484b74802379211e6cc15f for snapshot=snaptb0-testExportWithResetTtl 2024-11-23T17:27:20,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741969_1145 (size=107) 2024-11-23T17:27:20,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741969_1145 (size=107) 2024-11-23T17:27:20,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741969_1145 (size=107) 2024-11-23T17:27:20,850 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,1,1732382838210.da60fa86fdbf8f39601828babeb53202. 2024-11-23T17:27:20,851 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=73 2024-11-23T17:27:20,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.HMaster(4169): Remote procedure done, pid=73 2024-11-23T17:27:20,851 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithResetTtl on region da60fa86fdbf8f39601828babeb53202 2024-11-23T17:27:20,851 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=73, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure da60fa86fdbf8f39601828babeb53202 2024-11-23T17:27:20,863 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=73, ppid=71, state=SUCCESS, hasLock=false; SnapshotRegionProcedure da60fa86fdbf8f39601828babeb53202 in 255 msec 2024-11-23T17:27:20,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741968_1144 (size=107) 2024-11-23T17:27:20,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741968_1144 (size=107) 2024-11-23T17:27:20,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741968_1144 (size=107) 2024-11-23T17:27:20,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=71 2024-11-23T17:27:21,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=71 2024-11-23T17:27:21,266 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,,1732382838210.efe67ed8d86346ec0aee1231aa1ea894. 2024-11-23T17:27:21,266 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=72 2024-11-23T17:27:21,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.HMaster(4169): Remote procedure done, pid=72 2024-11-23T17:27:21,267 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithResetTtl on region efe67ed8d86346ec0aee1231aa1ea894 2024-11-23T17:27:21,267 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=72, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure efe67ed8d86346ec0aee1231aa1ea894 2024-11-23T17:27:21,271 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=72, resume processing ppid=71 2024-11-23T17:27:21,271 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=72, ppid=71, state=SUCCESS, hasLock=false; SnapshotRegionProcedure efe67ed8d86346ec0aee1231aa1ea894 in 667 msec 2024-11-23T17:27:21,271 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-23T17:27:21,273 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-23T17:27:21,274 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-23T17:27:21,274 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithResetTtl 2024-11-23T17:27:21,275 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/.tmp/snaptb0-testExportWithResetTtl 2024-11-23T17:27:21,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741970_1146 (size=621) 2024-11-23T17:27:21,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741970_1146 (size=621) 2024-11-23T17:27:21,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741970_1146 (size=621) 2024-11-23T17:27:21,377 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732382632246_0002_000001 (auth:SIMPLE) from 127.0.0.1:44706 2024-11-23T17:27:21,382 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-23T17:27:21,394 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-1_1/usercache/jenkins/appcache/application_1732382632246_0002/container_1732382632246_0002_01_000001/launch_container.sh] 2024-11-23T17:27:21,394 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-1_1/usercache/jenkins/appcache/application_1732382632246_0002/container_1732382632246_0002_01_000001/container_tokens] 2024-11-23T17:27:21,394 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-1_1/usercache/jenkins/appcache/application_1732382632246_0002/container_1732382632246_0002_01_000001/sysfs] 2024-11-23T17:27:21,404 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-23T17:27:21,405 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/.tmp/snaptb0-testExportWithResetTtl to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/snaptb0-testExportWithResetTtl 2024-11-23T17:27:21,409 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-23T17:27:21,409 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 71 2024-11-23T17:27:21,412 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=71, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } in 823 msec 2024-11-23T17:27:21,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=71 2024-11-23T17:27:21,727 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithResetTtl completed 2024-11-23T17:27:21,728 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T17:27:21,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure2.ProcedureExecutor(1139): Stored pid=74, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testExportWithResetTtl 2024-11-23T17:27:21,730 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T17:27:21,730 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:27:21,730 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportWithResetTtl" procId is: 74 2024-11-23T17:27:21,731 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T17:27:21,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-11-23T17:27:21,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741971_1147 (size=397) 2024-11-23T17:27:21,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741971_1147 (size=397) 2024-11-23T17:27:21,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741971_1147 (size=397) 2024-11-23T17:27:21,741 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 0edf1213fb703278024472158f703420, NAME => 'testExportWithResetTtl,,1732382841728.0edf1213fb703278024472158f703420.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf 2024-11-23T17:27:21,742 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 77c6a4d198354393f3bb535244e96b75, NAME => 'testExportWithResetTtl,1,1732382841728.77c6a4d198354393f3bb535244e96b75.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf 2024-11-23T17:27:21,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741973_1149 (size=58) 2024-11-23T17:27:21,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741973_1149 (size=58) 2024-11-23T17:27:21,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741973_1149 (size=58) 2024-11-23T17:27:21,751 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,1,1732382841728.77c6a4d198354393f3bb535244e96b75.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:27:21,751 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1722): Closing 77c6a4d198354393f3bb535244e96b75, disabling compactions & flushes 2024-11-23T17:27:21,751 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,1,1732382841728.77c6a4d198354393f3bb535244e96b75. 2024-11-23T17:27:21,751 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,1,1732382841728.77c6a4d198354393f3bb535244e96b75. 2024-11-23T17:27:21,751 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,1,1732382841728.77c6a4d198354393f3bb535244e96b75. after waiting 0 ms 2024-11-23T17:27:21,751 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,1,1732382841728.77c6a4d198354393f3bb535244e96b75. 2024-11-23T17:27:21,751 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1973): Closed testExportWithResetTtl,1,1732382841728.77c6a4d198354393f3bb535244e96b75. 2024-11-23T17:27:21,751 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1676): Region close journal for 77c6a4d198354393f3bb535244e96b75: Waiting for close lock at 1732382841751Disabling compacts and flushes for region at 1732382841751Disabling writes for close at 1732382841751Writing region close event to WAL at 1732382841751Closed at 1732382841751 2024-11-23T17:27:21,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741972_1148 (size=58) 2024-11-23T17:27:21,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741972_1148 (size=58) 2024-11-23T17:27:21,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741972_1148 (size=58) 2024-11-23T17:27:21,755 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,,1732382841728.0edf1213fb703278024472158f703420.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:27:21,755 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1722): Closing 0edf1213fb703278024472158f703420, disabling compactions & flushes 2024-11-23T17:27:21,755 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,,1732382841728.0edf1213fb703278024472158f703420. 2024-11-23T17:27:21,755 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,,1732382841728.0edf1213fb703278024472158f703420. 2024-11-23T17:27:21,755 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,,1732382841728.0edf1213fb703278024472158f703420. after waiting 0 ms 2024-11-23T17:27:21,755 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,,1732382841728.0edf1213fb703278024472158f703420. 2024-11-23T17:27:21,755 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1973): Closed testExportWithResetTtl,,1732382841728.0edf1213fb703278024472158f703420. 2024-11-23T17:27:21,755 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1676): Region close journal for 0edf1213fb703278024472158f703420: Waiting for close lock at 1732382841755Disabling compacts and flushes for region at 1732382841755Disabling writes for close at 1732382841755Writing region close event to WAL at 1732382841755Closed at 1732382841755 2024-11-23T17:27:21,756 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T17:27:21,757 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportWithResetTtl,1,1732382841728.77c6a4d198354393f3bb535244e96b75.","families":{"info":[{"qualifier":"regioninfo","vlen":57,"tag":[],"timestamp":"1732382841756"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732382841756"}]},"ts":"1732382841756"} 2024-11-23T17:27:21,757 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportWithResetTtl,,1732382841728.0edf1213fb703278024472158f703420.","families":{"info":[{"qualifier":"regioninfo","vlen":57,"tag":[],"timestamp":"1732382841756"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732382841756"}]},"ts":"1732382841756"} 2024-11-23T17:27:21,759 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-23T17:27:21,759 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T17:27:21,760 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732382841760"}]},"ts":"1732382841760"} 2024-11-23T17:27:21,761 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=ENABLING in hbase:meta 2024-11-23T17:27:21,761 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {215bb12e999f=0} racks are {/default-rack=0} 2024-11-23T17:27:21,762 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-23T17:27:21,762 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-23T17:27:21,762 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-23T17:27:21,762 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-23T17:27:21,762 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-23T17:27:21,762 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-23T17:27:21,762 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-23T17:27:21,763 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-23T17:27:21,763 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-23T17:27:21,763 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-23T17:27:21,763 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=0edf1213fb703278024472158f703420, ASSIGN}, {pid=76, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=77c6a4d198354393f3bb535244e96b75, ASSIGN}] 2024-11-23T17:27:21,764 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=76, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=77c6a4d198354393f3bb535244e96b75, ASSIGN 2024-11-23T17:27:21,764 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=0edf1213fb703278024472158f703420, ASSIGN 2024-11-23T17:27:21,765 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=76, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=77c6a4d198354393f3bb535244e96b75, ASSIGN; state=OFFLINE, location=215bb12e999f,41065,1732382621000; forceNewPlan=false, retain=false 2024-11-23T17:27:21,765 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=0edf1213fb703278024472158f703420, ASSIGN; state=OFFLINE, location=215bb12e999f,44667,1732382620918; forceNewPlan=false, retain=false 2024-11-23T17:27:21,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-11-23T17:27:21,915 INFO [215bb12e999f:39621 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-23T17:27:21,916 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=76 updating hbase:meta row=77c6a4d198354393f3bb535244e96b75, regionState=OPENING, regionLocation=215bb12e999f,41065,1732382621000 2024-11-23T17:27:21,916 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=75 updating hbase:meta row=0edf1213fb703278024472158f703420, regionState=OPENING, regionLocation=215bb12e999f,44667,1732382620918 2024-11-23T17:27:21,917 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=0edf1213fb703278024472158f703420, ASSIGN because future has completed 2024-11-23T17:27:21,918 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=77, ppid=75, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0edf1213fb703278024472158f703420, server=215bb12e999f,44667,1732382620918}] 2024-11-23T17:27:21,918 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=76, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=77c6a4d198354393f3bb535244e96b75, ASSIGN because future has completed 2024-11-23T17:27:21,918 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=78, ppid=76, state=RUNNABLE, hasLock=false; OpenRegionProcedure 77c6a4d198354393f3bb535244e96b75, server=215bb12e999f,41065,1732382621000}] 2024-11-23T17:27:22,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-11-23T17:27:22,074 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] handler.AssignRegionHandler(132): Open testExportWithResetTtl,1,1732382841728.77c6a4d198354393f3bb535244e96b75. 2024-11-23T17:27:22,074 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(7752): Opening region: {ENCODED => 77c6a4d198354393f3bb535244e96b75, NAME => 'testExportWithResetTtl,1,1732382841728.77c6a4d198354393f3bb535244e96b75.', STARTKEY => '1', ENDKEY => ''} 2024-11-23T17:27:22,075 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportWithResetTtl,1,1732382841728.77c6a4d198354393f3bb535244e96b75. service=AccessControlService 2024-11-23T17:27:22,075 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:27:22,075 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportWithResetTtl 77c6a4d198354393f3bb535244e96b75 2024-11-23T17:27:22,076 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,1,1732382841728.77c6a4d198354393f3bb535244e96b75.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:27:22,076 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(7794): checking encryption for 77c6a4d198354393f3bb535244e96b75 2024-11-23T17:27:22,076 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(7797): checking classloading for 77c6a4d198354393f3bb535244e96b75 2024-11-23T17:27:22,078 INFO [StoreOpener-77c6a4d198354393f3bb535244e96b75-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 77c6a4d198354393f3bb535244e96b75 2024-11-23T17:27:22,079 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] handler.AssignRegionHandler(132): Open testExportWithResetTtl,,1732382841728.0edf1213fb703278024472158f703420. 2024-11-23T17:27:22,080 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(7752): Opening region: {ENCODED => 0edf1213fb703278024472158f703420, NAME => 'testExportWithResetTtl,,1732382841728.0edf1213fb703278024472158f703420.', STARTKEY => '', ENDKEY => '1'} 2024-11-23T17:27:22,080 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportWithResetTtl,,1732382841728.0edf1213fb703278024472158f703420. service=AccessControlService 2024-11-23T17:27:22,080 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:27:22,080 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportWithResetTtl 0edf1213fb703278024472158f703420 2024-11-23T17:27:22,080 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,,1732382841728.0edf1213fb703278024472158f703420.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:27:22,080 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(7794): checking encryption for 0edf1213fb703278024472158f703420 2024-11-23T17:27:22,080 INFO [StoreOpener-77c6a4d198354393f3bb535244e96b75-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 77c6a4d198354393f3bb535244e96b75 columnFamilyName cf 2024-11-23T17:27:22,080 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(7797): checking classloading for 0edf1213fb703278024472158f703420 2024-11-23T17:27:22,081 DEBUG [StoreOpener-77c6a4d198354393f3bb535244e96b75-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:27:22,081 INFO [StoreOpener-77c6a4d198354393f3bb535244e96b75-1 {}] regionserver.HStore(327): Store=77c6a4d198354393f3bb535244e96b75/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T17:27:22,081 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1038): replaying wal for 77c6a4d198354393f3bb535244e96b75 2024-11-23T17:27:22,082 INFO [StoreOpener-0edf1213fb703278024472158f703420-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 0edf1213fb703278024472158f703420 2024-11-23T17:27:22,082 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportWithResetTtl/77c6a4d198354393f3bb535244e96b75 2024-11-23T17:27:22,082 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportWithResetTtl/77c6a4d198354393f3bb535244e96b75 2024-11-23T17:27:22,083 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1048): stopping wal replay for 77c6a4d198354393f3bb535244e96b75 2024-11-23T17:27:22,083 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1060): Cleaning up temporary data for 77c6a4d198354393f3bb535244e96b75 2024-11-23T17:27:22,084 INFO [StoreOpener-0edf1213fb703278024472158f703420-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0edf1213fb703278024472158f703420 columnFamilyName cf 2024-11-23T17:27:22,084 DEBUG [StoreOpener-0edf1213fb703278024472158f703420-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:27:22,084 INFO [StoreOpener-0edf1213fb703278024472158f703420-1 {}] regionserver.HStore(327): Store=0edf1213fb703278024472158f703420/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T17:27:22,084 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1038): replaying wal for 0edf1213fb703278024472158f703420 2024-11-23T17:27:22,085 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1093): writing seq id for 77c6a4d198354393f3bb535244e96b75 2024-11-23T17:27:22,085 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportWithResetTtl/0edf1213fb703278024472158f703420 2024-11-23T17:27:22,085 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportWithResetTtl/0edf1213fb703278024472158f703420 2024-11-23T17:27:22,086 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1048): stopping wal replay for 0edf1213fb703278024472158f703420 2024-11-23T17:27:22,086 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1060): Cleaning up temporary data for 0edf1213fb703278024472158f703420 2024-11-23T17:27:22,090 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1093): writing seq id for 0edf1213fb703278024472158f703420 2024-11-23T17:27:22,123 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportWithResetTtl/0edf1213fb703278024472158f703420/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T17:27:22,124 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1114): Opened 0edf1213fb703278024472158f703420; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70976452, jitterRate=0.05763155221939087}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T17:27:22,124 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 0edf1213fb703278024472158f703420 2024-11-23T17:27:22,125 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1006): Region open journal for 0edf1213fb703278024472158f703420: Running coprocessor pre-open hook at 1732382842081Writing region info on filesystem at 1732382842081Initializing all the Stores at 1732382842082 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732382842082Cleaning up temporary data from old regions at 1732382842086 (+4 ms)Running coprocessor post-open hooks at 1732382842124 (+38 ms)Region opened successfully at 1732382842125 (+1 ms) 2024-11-23T17:27:22,127 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportWithResetTtl,,1732382841728.0edf1213fb703278024472158f703420., pid=77, masterSystemTime=1732382842069 2024-11-23T17:27:22,132 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportWithResetTtl,,1732382841728.0edf1213fb703278024472158f703420. 2024-11-23T17:27:22,132 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] handler.AssignRegionHandler(153): Opened testExportWithResetTtl,,1732382841728.0edf1213fb703278024472158f703420. 2024-11-23T17:27:22,133 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=75 updating hbase:meta row=0edf1213fb703278024472158f703420, regionState=OPEN, openSeqNum=2, regionLocation=215bb12e999f,44667,1732382620918 2024-11-23T17:27:22,136 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportWithResetTtl/77c6a4d198354393f3bb535244e96b75/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T17:27:22,137 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39621 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=OPEN, location=215bb12e999f,44667,1732382620918, table=testExportWithResetTtl, region=0edf1213fb703278024472158f703420. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-11-23T17:27:22,138 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=77, ppid=75, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0edf1213fb703278024472158f703420, server=215bb12e999f,44667,1732382620918 because future has completed 2024-11-23T17:27:22,138 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1114): Opened 77c6a4d198354393f3bb535244e96b75; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72382682, jitterRate=0.07858601212501526}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T17:27:22,138 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 77c6a4d198354393f3bb535244e96b75 2024-11-23T17:27:22,138 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1006): Region open journal for 77c6a4d198354393f3bb535244e96b75: Running coprocessor pre-open hook at 1732382842076Writing region info on filesystem at 1732382842076Initializing all the Stores at 1732382842077 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732382842077Cleaning up temporary data from old regions at 1732382842083 (+6 ms)Running coprocessor post-open hooks at 1732382842138 (+55 ms)Region opened successfully at 1732382842138 2024-11-23T17:27:22,141 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportWithResetTtl,1,1732382841728.77c6a4d198354393f3bb535244e96b75., pid=78, masterSystemTime=1732382842070 2024-11-23T17:27:22,149 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportWithResetTtl,1,1732382841728.77c6a4d198354393f3bb535244e96b75. 2024-11-23T17:27:22,150 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] handler.AssignRegionHandler(153): Opened testExportWithResetTtl,1,1732382841728.77c6a4d198354393f3bb535244e96b75. 2024-11-23T17:27:22,152 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=76 updating hbase:meta row=77c6a4d198354393f3bb535244e96b75, regionState=OPEN, openSeqNum=2, regionLocation=215bb12e999f,41065,1732382621000 2024-11-23T17:27:22,154 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=77, resume processing ppid=75 2024-11-23T17:27:22,155 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=77, ppid=75, state=SUCCESS, hasLock=false; OpenRegionProcedure 0edf1213fb703278024472158f703420, server=215bb12e999f,44667,1732382620918 in 225 msec 2024-11-23T17:27:22,156 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=75, ppid=74, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=0edf1213fb703278024472158f703420, ASSIGN in 391 msec 2024-11-23T17:27:22,157 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=78, ppid=76, state=RUNNABLE, hasLock=false; OpenRegionProcedure 77c6a4d198354393f3bb535244e96b75, server=215bb12e999f,41065,1732382621000 because future has completed 2024-11-23T17:27:22,160 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=78, resume processing ppid=76 2024-11-23T17:27:22,160 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=78, ppid=76, state=SUCCESS, hasLock=false; OpenRegionProcedure 77c6a4d198354393f3bb535244e96b75, server=215bb12e999f,41065,1732382621000 in 240 msec 2024-11-23T17:27:22,162 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=76, resume processing ppid=74 2024-11-23T17:27:22,162 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=76, ppid=74, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=77c6a4d198354393f3bb535244e96b75, ASSIGN in 397 msec 2024-11-23T17:27:22,163 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T17:27:22,163 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732382842163"}]},"ts":"1732382842163"} 2024-11-23T17:27:22,165 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=ENABLED in hbase:meta 2024-11-23T17:27:22,166 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T17:27:22,166 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testExportWithResetTtl jenkins: RWXCA 2024-11-23T17:27:22,172 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41065 {}] access.PermissionStorage(613): Read acl: entry[testExportWithResetTtl], kv [jenkins: RWXCA] 2024-11-23T17:27:22,232 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x10168b8ee290003, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:27:22,232 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:27:22,232 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45467-0x10168b8ee290001, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:27:22,232 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44667-0x10168b8ee290002, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:27:22,272 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-23T17:27:22,272 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-23T17:27:22,272 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-23T17:27:22,272 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-23T17:27:22,272 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-23T17:27:22,273 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-23T17:27:22,273 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-23T17:27:22,273 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-23T17:27:22,274 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=74, state=SUCCESS, hasLock=false; CreateTableProcedure table=testExportWithResetTtl in 543 msec 2024-11-23T17:27:22,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-11-23T17:27:22,357 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testExportWithResetTtl completed 2024-11-23T17:27:22,357 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testExportWithResetTtl get assigned. Timeout = 60000ms 2024-11-23T17:27:22,357 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T17:27:22,362 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testExportWithResetTtl assigned to meta. Checking AM states. 2024-11-23T17:27:22,362 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T17:27:22,362 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testExportWithResetTtl assigned. 2024-11-23T17:27:22,362 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-23T17:27:22,376 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportWithResetTtl', row='0bd0654a6d2e09fcf569ce463ee2871a1', locateType=CURRENT is [region=testExportWithResetTtl,,1732382841728.0edf1213fb703278024472158f703420., hostname=215bb12e999f,44667,1732382620918, seqNum=2] 2024-11-23T17:27:22,379 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportWithResetTtl', row='1a20078d7addbe2e3d4741a8d475695a', locateType=CURRENT is [region=testExportWithResetTtl,1,1732382841728.77c6a4d198354393f3bb535244e96b75., hostname=215bb12e999f,41065,1732382621000, seqNum=2] 2024-11-23T17:27:22,380 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportWithResetTtl', row='1ca189b1b4f5665bb539167958d123fc3', locateType=CURRENT is [region=testExportWithResetTtl,1,1732382841728.77c6a4d198354393f3bb535244e96b75., hostname=215bb12e999f,41065,1732382621000, seqNum=2] 2024-11-23T17:27:22,381 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportWithResetTtl', row='26cba363858f8befb3878c89833874dfe', locateType=CURRENT is [region=testExportWithResetTtl,1,1732382841728.77c6a4d198354393f3bb535244e96b75., hostname=215bb12e999f,41065,1732382621000, seqNum=2] 2024-11-23T17:27:22,381 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportWithResetTtl', row='3ced135a09533154c550476ec18f79c12', locateType=CURRENT is [region=testExportWithResetTtl,1,1732382841728.77c6a4d198354393f3bb535244e96b75., hostname=215bb12e999f,41065,1732382621000, seqNum=2] 2024-11-23T17:27:22,382 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportWithResetTtl', row='49e509e58ee28dd9e160571033f9e62ba', locateType=CURRENT is [region=testExportWithResetTtl,1,1732382841728.77c6a4d198354393f3bb535244e96b75., hostname=215bb12e999f,41065,1732382621000, seqNum=2] 2024-11-23T17:27:22,383 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportWithResetTtl', row='5c66e53d0f96022c971095d701fb6df3a', locateType=CURRENT is [region=testExportWithResetTtl,1,1732382841728.77c6a4d198354393f3bb535244e96b75., hostname=215bb12e999f,41065,1732382621000, seqNum=2] 2024-11-23T17:27:22,384 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportWithResetTtl', row='605714b8e0486c8e6af9e1d7515559be0', locateType=CURRENT is [region=testExportWithResetTtl,1,1732382841728.77c6a4d198354393f3bb535244e96b75., hostname=215bb12e999f,41065,1732382621000, seqNum=2] 2024-11-23T17:27:22,384 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44667 {}] regionserver.HRegion(8528): writing data to region testExportWithResetTtl,,1732382841728.0edf1213fb703278024472158f703420. with WAL disabled. Data may be lost in the event of a crash. 2024-11-23T17:27:22,385 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportWithResetTtl', row='5d6946d3458d68c9ae42eb2f7f8a91b2', locateType=CURRENT is [region=testExportWithResetTtl,1,1732382841728.77c6a4d198354393f3bb535244e96b75., hostname=215bb12e999f,41065,1732382621000, seqNum=2] 2024-11-23T17:27:22,386 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41065 {}] regionserver.HRegion(8528): writing data to region testExportWithResetTtl,1,1732382841728.77c6a4d198354393f3bb535244e96b75. with WAL disabled. Data may be lost in the event of a crash. 2024-11-23T17:27:22,388 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-23T17:27:22,391 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportWithResetTtl 2024-11-23T17:27:22,391 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportWithResetTtl,,1732382841728.0edf1213fb703278024472158f703420. 2024-11-23T17:27:22,391 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T17:27:22,393 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-23T17:27:22,398 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-23T17:27:22,404 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-23T17:27:22,406 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } 2024-11-23T17:27:22,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732382842406 (current time:1732382842406). 2024-11-23T17:27:22,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb-testExportWithResetTtl VERSION not specified, setting to 2 2024-11-23T17:27:22,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-23T17:27:22,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7373d3f1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:27:22,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.ClusterIdFetcher(90): Going to request 215bb12e999f,39621,-1 for getting cluster id 2024-11-23T17:27:22,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:27:22,408 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d' 2024-11-23T17:27:22,408 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:27:22,409 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d" 2024-11-23T17:27:22,409 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6bb653ca, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:27:22,409 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [215bb12e999f,39621,-1] 2024-11-23T17:27:22,409 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:27:22,409 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:27:22,410 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45844, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:27:22,410 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7f6c2102, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:27:22,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:27:22,411 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=215bb12e999f,44667,1732382620918, seqNum=-1] 2024-11-23T17:27:22,411 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:27:22,412 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33610, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:27:22,414 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621. 2024-11-23T17:27:22,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:27:22,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:27:22,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:27:22,414 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:27:22,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c94f000, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:27:22,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.ClusterIdFetcher(90): Going to request 215bb12e999f,39621,-1 for getting cluster id 2024-11-23T17:27:22,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:27:22,421 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d' 2024-11-23T17:27:22,421 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:27:22,421 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d" 2024-11-23T17:27:22,421 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5fd873e3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:27:22,421 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [215bb12e999f,39621,-1] 2024-11-23T17:27:22,422 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:27:22,422 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:27:22,422 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45868, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:27:22,423 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@65732fba, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:27:22,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:27:22,424 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=215bb12e999f,44667,1732382620918, seqNum=-1] 2024-11-23T17:27:22,425 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:27:22,425 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33614, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:27:22,427 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1732382625522.41264850465b123187f03a78b2521b39., hostname=215bb12e999f,41065,1732382621000, seqNum=2] 2024-11-23T17:27:22,427 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:27:22,428 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53912, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:27:22,429 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621. 2024-11-23T17:27:22,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:27:22,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:27:22,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:27:22,430 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:27:22,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] access.PermissionStorage(613): Read acl: entry[testExportWithResetTtl], kv [jenkins: RWXCA] 2024-11-23T17:27:22,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-23T17:27:22,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure2.ProcedureExecutor(1139): Stored pid=79, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } 2024-11-23T17:27:22,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }, snapshot procedure id = 79 2024-11-23T17:27:22,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=79 2024-11-23T17:27:22,433 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_PREPARE 2024-11-23T17:27:22,434 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-23T17:27:22,437 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-23T17:27:22,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741974_1150 (size=143) 2024-11-23T17:27:22,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741974_1150 (size=143) 2024-11-23T17:27:22,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741974_1150 (size=143) 2024-11-23T17:27:22,444 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-23T17:27:22,444 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=80, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0edf1213fb703278024472158f703420}, {pid=81, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 77c6a4d198354393f3bb535244e96b75}] 2024-11-23T17:27:22,445 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=81, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 77c6a4d198354393f3bb535244e96b75 2024-11-23T17:27:22,445 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=80, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0edf1213fb703278024472158f703420 2024-11-23T17:27:22,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=79 2024-11-23T17:27:22,597 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44667 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=80 2024-11-23T17:27:22,597 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41065 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=81 2024-11-23T17:27:22,597 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportWithResetTtl,1,1732382841728.77c6a4d198354393f3bb535244e96b75. 2024-11-23T17:27:22,597 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportWithResetTtl,,1732382841728.0edf1213fb703278024472158f703420. 2024-11-23T17:27:22,597 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HRegion(2902): Flushing 0edf1213fb703278024472158f703420 1/1 column families, dataSize=65 B heapSize=400 B 2024-11-23T17:27:22,597 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HRegion(2902): Flushing 77c6a4d198354393f3bb535244e96b75 1/1 column families, dataSize=3.19 KB heapSize=7.14 KB 2024-11-23T17:27:22,613 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportWithResetTtl/77c6a4d198354393f3bb535244e96b75/.tmp/cf/87275cfdd27e41ee93ab5c53a066587b is 71, key is 10dd67d9efc8d6e2007c569af2d3aed4/cf:q/1732382842386/Put/seqid=0 2024-11-23T17:27:22,619 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportWithResetTtl/0edf1213fb703278024472158f703420/.tmp/cf/ca0413d74dfc4f30af3e8825af96652b is 69, key is 0bd0654a6d2e09fcf569ce463ee2871a1/cf:q/1732382842384/Put/seqid=0 2024-11-23T17:27:22,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741975_1151 (size=8460) 2024-11-23T17:27:22,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741975_1151 (size=8460) 2024-11-23T17:27:22,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741975_1151 (size=8460) 2024-11-23T17:27:22,628 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.19 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportWithResetTtl/77c6a4d198354393f3bb535244e96b75/.tmp/cf/87275cfdd27e41ee93ab5c53a066587b 2024-11-23T17:27:22,637 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportWithResetTtl/77c6a4d198354393f3bb535244e96b75/.tmp/cf/87275cfdd27e41ee93ab5c53a066587b as hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportWithResetTtl/77c6a4d198354393f3bb535244e96b75/cf/87275cfdd27e41ee93ab5c53a066587b 2024-11-23T17:27:22,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741976_1152 (size=5149) 2024-11-23T17:27:22,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741976_1152 (size=5149) 2024-11-23T17:27:22,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741976_1152 (size=5149) 2024-11-23T17:27:22,640 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportWithResetTtl/0edf1213fb703278024472158f703420/.tmp/cf/ca0413d74dfc4f30af3e8825af96652b 2024-11-23T17:27:22,646 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportWithResetTtl/77c6a4d198354393f3bb535244e96b75/cf/87275cfdd27e41ee93ab5c53a066587b, entries=49, sequenceid=5, filesize=8.3 K 2024-11-23T17:27:22,647 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HRegion(3140): Finished flush of dataSize ~3.19 KB/3271, heapSize ~7.13 KB/7296, currentSize=0 B/0 for 77c6a4d198354393f3bb535244e96b75 in 50ms, sequenceid=5, compaction requested=false 2024-11-23T17:27:22,647 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportWithResetTtl' 2024-11-23T17:27:22,648 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HRegion(2603): Flush status journal for 77c6a4d198354393f3bb535244e96b75: 2024-11-23T17:27:22,648 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportWithResetTtl,1,1732382841728.77c6a4d198354393f3bb535244e96b75. for snaptb-testExportWithResetTtl completed. 2024-11-23T17:27:22,648 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] snapshot.SnapshotManifest(241): Storing 'testExportWithResetTtl,1,1732382841728.77c6a4d198354393f3bb535244e96b75.' region-info for snapshot=snaptb-testExportWithResetTtl 2024-11-23T17:27:22,648 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:27:22,648 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportWithResetTtl/77c6a4d198354393f3bb535244e96b75/cf/87275cfdd27e41ee93ab5c53a066587b] hfiles 2024-11-23T17:27:22,648 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportWithResetTtl/77c6a4d198354393f3bb535244e96b75/cf/87275cfdd27e41ee93ab5c53a066587b for snapshot=snaptb-testExportWithResetTtl 2024-11-23T17:27:22,650 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportWithResetTtl/0edf1213fb703278024472158f703420/.tmp/cf/ca0413d74dfc4f30af3e8825af96652b as hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportWithResetTtl/0edf1213fb703278024472158f703420/cf/ca0413d74dfc4f30af3e8825af96652b 2024-11-23T17:27:22,658 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportWithResetTtl/0edf1213fb703278024472158f703420/cf/ca0413d74dfc4f30af3e8825af96652b, entries=1, sequenceid=5, filesize=5.0 K 2024-11-23T17:27:22,659 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HRegion(3140): Finished flush of dataSize ~65 B/65, heapSize ~384 B/384, currentSize=0 B/0 for 0edf1213fb703278024472158f703420 in 62ms, sequenceid=5, compaction requested=false 2024-11-23T17:27:22,659 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HRegion(2603): Flush status journal for 0edf1213fb703278024472158f703420: 2024-11-23T17:27:22,659 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportWithResetTtl,,1732382841728.0edf1213fb703278024472158f703420. for snaptb-testExportWithResetTtl completed. 2024-11-23T17:27:22,660 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] snapshot.SnapshotManifest(241): Storing 'testExportWithResetTtl,,1732382841728.0edf1213fb703278024472158f703420.' region-info for snapshot=snaptb-testExportWithResetTtl 2024-11-23T17:27:22,660 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:27:22,660 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportWithResetTtl/0edf1213fb703278024472158f703420/cf/ca0413d74dfc4f30af3e8825af96652b] hfiles 2024-11-23T17:27:22,660 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportWithResetTtl/0edf1213fb703278024472158f703420/cf/ca0413d74dfc4f30af3e8825af96652b for snapshot=snaptb-testExportWithResetTtl 2024-11-23T17:27:22,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741977_1153 (size=100) 2024-11-23T17:27:22,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741977_1153 (size=100) 2024-11-23T17:27:22,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741977_1153 (size=100) 2024-11-23T17:27:22,661 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportWithResetTtl,1,1732382841728.77c6a4d198354393f3bb535244e96b75. 2024-11-23T17:27:22,662 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=81 2024-11-23T17:27:22,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.HMaster(4169): Remote procedure done, pid=81 2024-11-23T17:27:22,662 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb-testExportWithResetTtl on region 77c6a4d198354393f3bb535244e96b75 2024-11-23T17:27:22,662 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=81, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 77c6a4d198354393f3bb535244e96b75 2024-11-23T17:27:22,666 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=81, ppid=79, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 77c6a4d198354393f3bb535244e96b75 in 220 msec 2024-11-23T17:27:22,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741978_1154 (size=100) 2024-11-23T17:27:22,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741978_1154 (size=100) 2024-11-23T17:27:22,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741978_1154 (size=100) 2024-11-23T17:27:22,679 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportWithResetTtl,,1732382841728.0edf1213fb703278024472158f703420. 2024-11-23T17:27:22,679 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=80 2024-11-23T17:27:22,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.HMaster(4169): Remote procedure done, pid=80 2024-11-23T17:27:22,680 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb-testExportWithResetTtl on region 0edf1213fb703278024472158f703420 2024-11-23T17:27:22,680 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=80, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0edf1213fb703278024472158f703420 2024-11-23T17:27:22,684 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=80, resume processing ppid=79 2024-11-23T17:27:22,684 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=80, ppid=79, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 0edf1213fb703278024472158f703420 in 237 msec 2024-11-23T17:27:22,684 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-23T17:27:22,685 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-23T17:27:22,686 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-23T17:27:22,686 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb-testExportWithResetTtl 2024-11-23T17:27:22,687 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-11-23T17:27:22,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741979_1155 (size=600) 2024-11-23T17:27:22,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741979_1155 (size=600) 2024-11-23T17:27:22,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741979_1155 (size=600) 2024-11-23T17:27:22,745 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-23T17:27:22,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=79 2024-11-23T17:27:22,750 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-23T17:27:22,751 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-11-23T17:27:22,754 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_POST_OPERATION 2024-11-23T17:27:22,754 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }, snapshot procedure id = 79 2024-11-23T17:27:22,758 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=79, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } in 323 msec 2024-11-23T17:27:22,889 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-23T17:27:23,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=79 2024-11-23T17:27:23,056 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testExportWithResetTtl completed 2024-11-23T17:27:23,068 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732382843068 2024-11-23T17:27:23,069 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:37605, tgtDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732382843068, rawTgtDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732382843068, srcFsUri=hdfs://localhost:37605, srcDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf 2024-11-23T17:27:23,115 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:37605, inputRoot=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf 2024-11-23T17:27:23,115 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1325804072_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732382843068, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732382843068/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-11-23T17:27:23,118 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-11-23T17:27:23,129 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/snaptb-testExportWithResetTtl to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732382843068/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-11-23T17:27:23,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741980_1156 (size=143) 2024-11-23T17:27:23,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741980_1156 (size=143) 2024-11-23T17:27:23,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741980_1156 (size=143) 2024-11-23T17:27:23,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741981_1157 (size=600) 2024-11-23T17:27:23,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741981_1157 (size=600) 2024-11-23T17:27:23,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741981_1157 (size=600) 2024-11-23T17:27:23,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741982_1158 (size=141) 2024-11-23T17:27:23,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741982_1158 (size=141) 2024-11-23T17:27:23,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741982_1158 (size=141) 2024-11-23T17:27:23,154 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:27:23,154 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:27:23,154 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:27:24,190 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/hadoop-7766916865192099286.jar 2024-11-23T17:27:24,191 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:27:24,191 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:27:24,258 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/hadoop-9414256180238526289.jar 2024-11-23T17:27:24,259 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:27:24,259 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:27:24,259 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:27:24,260 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:27:24,260 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:27:24,260 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:27:24,261 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-23T17:27:24,261 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-23T17:27:24,261 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-23T17:27:24,261 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-23T17:27:24,262 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-23T17:27:24,262 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-23T17:27:24,262 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-23T17:27:24,263 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-23T17:27:24,263 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-23T17:27:24,263 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-23T17:27:24,264 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-23T17:27:24,264 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:27:24,264 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:27:24,265 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T17:27:24,265 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:27:24,265 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:27:24,266 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T17:27:24,266 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T17:27:24,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741983_1159 (size=131440) 2024-11-23T17:27:24,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741983_1159 (size=131440) 2024-11-23T17:27:24,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741983_1159 (size=131440) 2024-11-23T17:27:24,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741984_1160 (size=4188619) 2024-11-23T17:27:24,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741984_1160 (size=4188619) 2024-11-23T17:27:24,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741984_1160 (size=4188619) 2024-11-23T17:27:24,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741985_1161 (size=1323991) 2024-11-23T17:27:24,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741985_1161 (size=1323991) 2024-11-23T17:27:24,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741985_1161 (size=1323991) 2024-11-23T17:27:24,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741986_1162 (size=903733) 2024-11-23T17:27:24,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741986_1162 (size=903733) 2024-11-23T17:27:24,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741986_1162 (size=903733) 2024-11-23T17:27:24,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741987_1163 (size=8360083) 2024-11-23T17:27:24,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741987_1163 (size=8360083) 2024-11-23T17:27:24,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741987_1163 (size=8360083) 2024-11-23T17:27:24,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741988_1164 (size=1877034) 2024-11-23T17:27:24,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741988_1164 (size=1877034) 2024-11-23T17:27:24,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741988_1164 (size=1877034) 2024-11-23T17:27:24,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741989_1165 (size=77835) 2024-11-23T17:27:24,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741989_1165 (size=77835) 2024-11-23T17:27:24,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741989_1165 (size=77835) 2024-11-23T17:27:24,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741990_1166 (size=6424742) 2024-11-23T17:27:24,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741990_1166 (size=6424742) 2024-11-23T17:27:24,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741990_1166 (size=6424742) 2024-11-23T17:27:24,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741991_1167 (size=30949) 2024-11-23T17:27:24,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741991_1167 (size=30949) 2024-11-23T17:27:24,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741991_1167 (size=30949) 2024-11-23T17:27:24,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741992_1168 (size=1597347) 2024-11-23T17:27:24,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741992_1168 (size=1597347) 2024-11-23T17:27:24,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741992_1168 (size=1597347) 2024-11-23T17:27:24,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741993_1169 (size=4695811) 2024-11-23T17:27:24,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741993_1169 (size=4695811) 2024-11-23T17:27:24,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741993_1169 (size=4695811) 2024-11-23T17:27:24,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741994_1170 (size=232957) 2024-11-23T17:27:24,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741994_1170 (size=232957) 2024-11-23T17:27:24,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741994_1170 (size=232957) 2024-11-23T17:27:24,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741995_1171 (size=127628) 2024-11-23T17:27:24,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741995_1171 (size=127628) 2024-11-23T17:27:24,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741995_1171 (size=127628) 2024-11-23T17:27:24,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741996_1172 (size=20406) 2024-11-23T17:27:24,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741996_1172 (size=20406) 2024-11-23T17:27:24,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741996_1172 (size=20406) 2024-11-23T17:27:24,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741997_1173 (size=5175431) 2024-11-23T17:27:24,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741997_1173 (size=5175431) 2024-11-23T17:27:24,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741997_1173 (size=5175431) 2024-11-23T17:27:24,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741998_1174 (size=440957) 2024-11-23T17:27:24,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741998_1174 (size=440957) 2024-11-23T17:27:24,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741998_1174 (size=440957) 2024-11-23T17:27:25,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741999_1175 (size=217634) 2024-11-23T17:27:25,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741999_1175 (size=217634) 2024-11-23T17:27:25,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741999_1175 (size=217634) 2024-11-23T17:27:25,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742000_1176 (size=1832290) 2024-11-23T17:27:25,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742000_1176 (size=1832290) 2024-11-23T17:27:25,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742000_1176 (size=1832290) 2024-11-23T17:27:25,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742001_1177 (size=322274) 2024-11-23T17:27:25,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742001_1177 (size=322274) 2024-11-23T17:27:25,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742001_1177 (size=322274) 2024-11-23T17:27:25,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742002_1178 (size=503880) 2024-11-23T17:27:25,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742002_1178 (size=503880) 2024-11-23T17:27:25,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742002_1178 (size=503880) 2024-11-23T17:27:25,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742003_1179 (size=29229) 2024-11-23T17:27:25,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742003_1179 (size=29229) 2024-11-23T17:27:25,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742003_1179 (size=29229) 2024-11-23T17:27:25,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742004_1180 (size=24096) 2024-11-23T17:27:25,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742004_1180 (size=24096) 2024-11-23T17:27:25,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742004_1180 (size=24096) 2024-11-23T17:27:25,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742005_1181 (size=111872) 2024-11-23T17:27:25,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742005_1181 (size=111872) 2024-11-23T17:27:25,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742005_1181 (size=111872) 2024-11-23T17:27:25,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742006_1182 (size=45609) 2024-11-23T17:27:25,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742006_1182 (size=45609) 2024-11-23T17:27:25,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742006_1182 (size=45609) 2024-11-23T17:27:25,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742007_1183 (size=136454) 2024-11-23T17:27:25,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742007_1183 (size=136454) 2024-11-23T17:27:25,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742007_1183 (size=136454) 2024-11-23T17:27:25,523 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-23T17:27:25,525 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb-testExportWithResetTtl' hfile list 2024-11-23T17:27:25,528 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=8.3 K 2024-11-23T17:27:25,528 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=5.0 K 2024-11-23T17:27:25,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742008_1184 (size=427) 2024-11-23T17:27:25,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742008_1184 (size=427) 2024-11-23T17:27:25,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742008_1184 (size=427) 2024-11-23T17:27:25,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742009_1185 (size=21) 2024-11-23T17:27:25,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742009_1185 (size=21) 2024-11-23T17:27:25,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742009_1185 (size=21) 2024-11-23T17:27:25,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742010_1186 (size=303994) 2024-11-23T17:27:25,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742010_1186 (size=303994) 2024-11-23T17:27:25,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742010_1186 (size=303994) 2024-11-23T17:27:25,585 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-23T17:27:25,585 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-23T17:27:26,369 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732382632246_0003_000001 (auth:SIMPLE) from 127.0.0.1:49732 2024-11-23T17:27:29,806 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl 2024-11-23T17:27:29,806 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl Metrics about Tables on a single HBase RegionServer 2024-11-23T17:27:32,469 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732382632246_0003_000001 (auth:SIMPLE) from 127.0.0.1:45760 2024-11-23T17:27:32,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742011_1187 (size=349692) 2024-11-23T17:27:32,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742011_1187 (size=349692) 2024-11-23T17:27:32,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742011_1187 (size=349692) 2024-11-23T17:27:34,774 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732382632246_0003_000001 (auth:SIMPLE) from 127.0.0.1:44802 2024-11-23T17:27:34,774 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732382632246_0003_000001 (auth:SIMPLE) from 127.0.0.1:49708 2024-11-23T17:27:38,090 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T17:27:40,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742012_1188 (size=5149) 2024-11-23T17:27:40,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742012_1188 (size=5149) 2024-11-23T17:27:40,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742012_1188 (size=5149) 2024-11-23T17:27:41,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742014_1190 (size=8460) 2024-11-23T17:27:41,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742014_1190 (size=8460) 2024-11-23T17:27:41,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742014_1190 (size=8460) 2024-11-23T17:27:41,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742013_1189 (size=22124) 2024-11-23T17:27:41,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742013_1189 (size=22124) 2024-11-23T17:27:41,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742013_1189 (size=22124) 2024-11-23T17:27:41,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742015_1191 (size=462) 2024-11-23T17:27:41,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742015_1191 (size=462) 2024-11-23T17:27:41,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742015_1191 (size=462) 2024-11-23T17:27:41,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742016_1192 (size=22124) 2024-11-23T17:27:41,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742016_1192 (size=22124) 2024-11-23T17:27:41,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742016_1192 (size=22124) 2024-11-23T17:27:41,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742017_1193 (size=349692) 2024-11-23T17:27:41,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742017_1193 (size=349692) 2024-11-23T17:27:41,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742017_1193 (size=349692) 2024-11-23T17:27:43,877 WARN [regionserver/215bb12e999f:0.Chore.1 {}] hbase.ExecutorStatusChore(69): RS_COMPACTED_FILES_DISCHARGER's size info, queued: 3, running: 1 2024-11-23T17:27:43,989 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-11-23T17:27:43,992 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-11-23T17:27:44,056 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb-testExportWithResetTtl 2024-11-23T17:27:44,056 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-11-23T17:27:44,058 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-11-23T17:27:44,059 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1325804072_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/snaptb-testExportWithResetTtl at hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-11-23T17:27:44,060 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/snaptb-testExportWithResetTtl/.snapshotinfo 2024-11-23T17:27:44,060 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/snaptb-testExportWithResetTtl/data.manifest 2024-11-23T17:27:44,060 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1325804072_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732382843068/.hbase-snapshot/snaptb-testExportWithResetTtl at hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732382843068/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-11-23T17:27:44,061 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732382843068/.hbase-snapshot/snaptb-testExportWithResetTtl/.snapshotinfo 2024-11-23T17:27:44,061 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732382843068/.hbase-snapshot/snaptb-testExportWithResetTtl/data.manifest 2024-11-23T17:27:44,097 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testExportWithResetTtl 2024-11-23T17:27:44,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure2.ProcedureExecutor(1139): Stored pid=82, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testExportWithResetTtl 2024-11-23T17:27:44,105 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732382864104"}]},"ts":"1732382864104"} 2024-11-23T17:27:44,108 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=DISABLING in hbase:meta 2024-11-23T17:27:44,109 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testExportWithResetTtl to state=DISABLING 2024-11-23T17:27:44,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=82 2024-11-23T17:27:44,116 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=83, ppid=82, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testExportWithResetTtl}] 2024-11-23T17:27:44,120 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=84, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=0edf1213fb703278024472158f703420, UNASSIGN}, {pid=85, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=77c6a4d198354393f3bb535244e96b75, UNASSIGN}] 2024-11-23T17:27:44,121 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=85, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=77c6a4d198354393f3bb535244e96b75, UNASSIGN 2024-11-23T17:27:44,122 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=84, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=0edf1213fb703278024472158f703420, UNASSIGN 2024-11-23T17:27:44,124 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=85 updating hbase:meta row=77c6a4d198354393f3bb535244e96b75, regionState=CLOSING, regionLocation=215bb12e999f,41065,1732382621000 2024-11-23T17:27:44,124 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=84 updating hbase:meta row=0edf1213fb703278024472158f703420, regionState=CLOSING, regionLocation=215bb12e999f,44667,1732382620918 2024-11-23T17:27:44,127 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=84, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=0edf1213fb703278024472158f703420, UNASSIGN because future has completed 2024-11-23T17:27:44,128 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=85, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=77c6a4d198354393f3bb535244e96b75, UNASSIGN because future has completed 2024-11-23T17:27:44,136 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T17:27:44,137 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=86, ppid=84, state=RUNNABLE, hasLock=false; CloseRegionProcedure 0edf1213fb703278024472158f703420, server=215bb12e999f,44667,1732382620918}] 2024-11-23T17:27:44,137 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T17:27:44,139 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=87, ppid=85, state=RUNNABLE, hasLock=false; CloseRegionProcedure 77c6a4d198354393f3bb535244e96b75, server=215bb12e999f,41065,1732382621000}] 2024-11-23T17:27:44,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=82 2024-11-23T17:27:44,291 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] handler.UnassignRegionHandler(122): Close 0edf1213fb703278024472158f703420 2024-11-23T17:27:44,292 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T17:27:44,292 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1722): Closing 0edf1213fb703278024472158f703420, disabling compactions & flushes 2024-11-23T17:27:44,292 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,,1732382841728.0edf1213fb703278024472158f703420. 2024-11-23T17:27:44,292 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,,1732382841728.0edf1213fb703278024472158f703420. 2024-11-23T17:27:44,292 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,,1732382841728.0edf1213fb703278024472158f703420. after waiting 0 ms 2024-11-23T17:27:44,292 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,,1732382841728.0edf1213fb703278024472158f703420. 2024-11-23T17:27:44,294 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] handler.UnassignRegionHandler(122): Close 77c6a4d198354393f3bb535244e96b75 2024-11-23T17:27:44,294 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T17:27:44,294 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1722): Closing 77c6a4d198354393f3bb535244e96b75, disabling compactions & flushes 2024-11-23T17:27:44,294 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,1,1732382841728.77c6a4d198354393f3bb535244e96b75. 2024-11-23T17:27:44,294 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,1,1732382841728.77c6a4d198354393f3bb535244e96b75. 2024-11-23T17:27:44,295 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,1,1732382841728.77c6a4d198354393f3bb535244e96b75. after waiting 0 ms 2024-11-23T17:27:44,295 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,1,1732382841728.77c6a4d198354393f3bb535244e96b75. 2024-11-23T17:27:44,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=82 2024-11-23T17:27:44,460 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportWithResetTtl/77c6a4d198354393f3bb535244e96b75/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-23T17:27:44,462 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:27:44,463 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1973): Closed testExportWithResetTtl,1,1732382841728.77c6a4d198354393f3bb535244e96b75. 2024-11-23T17:27:44,463 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1676): Region close journal for 77c6a4d198354393f3bb535244e96b75: Waiting for close lock at 1732382864294Running coprocessor pre-close hooks at 1732382864294Disabling compacts and flushes for region at 1732382864294Disabling writes for close at 1732382864295 (+1 ms)Writing region close event to WAL at 1732382864350 (+55 ms)Running coprocessor post-close hooks at 1732382864462 (+112 ms)Closed at 1732382864463 (+1 ms) 2024-11-23T17:27:44,467 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] handler.UnassignRegionHandler(157): Closed 77c6a4d198354393f3bb535244e96b75 2024-11-23T17:27:44,468 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=85 updating hbase:meta row=77c6a4d198354393f3bb535244e96b75, regionState=CLOSED 2024-11-23T17:27:44,477 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=87, ppid=85, state=RUNNABLE, hasLock=false; CloseRegionProcedure 77c6a4d198354393f3bb535244e96b75, server=215bb12e999f,41065,1732382621000 because future has completed 2024-11-23T17:27:44,477 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportWithResetTtl/0edf1213fb703278024472158f703420/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-23T17:27:44,478 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:27:44,478 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1973): Closed testExportWithResetTtl,,1732382841728.0edf1213fb703278024472158f703420. 2024-11-23T17:27:44,478 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1676): Region close journal for 0edf1213fb703278024472158f703420: Waiting for close lock at 1732382864292Running coprocessor pre-close hooks at 1732382864292Disabling compacts and flushes for region at 1732382864292Disabling writes for close at 1732382864292Writing region close event to WAL at 1732382864328 (+36 ms)Running coprocessor post-close hooks at 1732382864478 (+150 ms)Closed at 1732382864478 2024-11-23T17:27:44,482 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] handler.UnassignRegionHandler(157): Closed 0edf1213fb703278024472158f703420 2024-11-23T17:27:44,484 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=84 updating hbase:meta row=0edf1213fb703278024472158f703420, regionState=CLOSED 2024-11-23T17:27:44,485 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=87, resume processing ppid=85 2024-11-23T17:27:44,486 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=87, ppid=85, state=SUCCESS, hasLock=false; CloseRegionProcedure 77c6a4d198354393f3bb535244e96b75, server=215bb12e999f,41065,1732382621000 in 342 msec 2024-11-23T17:27:44,486 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=85, ppid=83, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=77c6a4d198354393f3bb535244e96b75, UNASSIGN in 365 msec 2024-11-23T17:27:44,487 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=86, ppid=84, state=RUNNABLE, hasLock=false; CloseRegionProcedure 0edf1213fb703278024472158f703420, server=215bb12e999f,44667,1732382620918 because future has completed 2024-11-23T17:27:44,493 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=86, resume processing ppid=84 2024-11-23T17:27:44,493 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=86, ppid=84, state=SUCCESS, hasLock=false; CloseRegionProcedure 0edf1213fb703278024472158f703420, server=215bb12e999f,44667,1732382620918 in 352 msec 2024-11-23T17:27:44,498 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=84, resume processing ppid=83 2024-11-23T17:27:44,498 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=84, ppid=83, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=0edf1213fb703278024472158f703420, UNASSIGN in 374 msec 2024-11-23T17:27:44,507 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=83, resume processing ppid=82 2024-11-23T17:27:44,507 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=83, ppid=82, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testExportWithResetTtl in 383 msec 2024-11-23T17:27:44,511 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732382864511"}]},"ts":"1732382864511"} 2024-11-23T17:27:44,516 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=DISABLED in hbase:meta 2024-11-23T17:27:44,516 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testExportWithResetTtl to state=DISABLED 2024-11-23T17:27:44,521 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=82, state=SUCCESS, hasLock=false; DisableTableProcedure table=testExportWithResetTtl in 421 msec 2024-11-23T17:27:44,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=82 2024-11-23T17:27:44,737 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testExportWithResetTtl completed 2024-11-23T17:27:44,739 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testExportWithResetTtl 2024-11-23T17:27:44,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure2.ProcedureExecutor(1139): Stored pid=88, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testExportWithResetTtl 2024-11-23T17:27:44,742 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=88, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-11-23T17:27:44,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] access.PermissionStorage(261): Removing permissions of removed table testExportWithResetTtl 2024-11-23T17:27:44,755 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=88, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-11-23T17:27:44,760 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41065 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testExportWithResetTtl 2024-11-23T17:27:44,781 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportWithResetTtl/0edf1213fb703278024472158f703420 2024-11-23T17:27:44,787 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportWithResetTtl/0edf1213fb703278024472158f703420/cf, FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportWithResetTtl/0edf1213fb703278024472158f703420/recovered.edits] 2024-11-23T17:27:44,799 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportWithResetTtl/77c6a4d198354393f3bb535244e96b75 2024-11-23T17:27:44,803 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportWithResetTtl/0edf1213fb703278024472158f703420/cf/ca0413d74dfc4f30af3e8825af96652b to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/archive/data/default/testExportWithResetTtl/0edf1213fb703278024472158f703420/cf/ca0413d74dfc4f30af3e8825af96652b 2024-11-23T17:27:44,808 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportWithResetTtl/0edf1213fb703278024472158f703420/recovered.edits/8.seqid to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/archive/data/default/testExportWithResetTtl/0edf1213fb703278024472158f703420/recovered.edits/8.seqid 2024-11-23T17:27:44,809 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportWithResetTtl/0edf1213fb703278024472158f703420 2024-11-23T17:27:44,813 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportWithResetTtl/77c6a4d198354393f3bb535244e96b75/cf, FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportWithResetTtl/77c6a4d198354393f3bb535244e96b75/recovered.edits] 2024-11-23T17:27:44,820 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45467-0x10168b8ee290001, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-23T17:27:44,820 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44667-0x10168b8ee290002, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-23T17:27:44,820 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x10168b8ee290003, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-23T17:27:44,821 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-11-23T17:27:44,821 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-11-23T17:27:44,823 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportWithResetTtl/77c6a4d198354393f3bb535244e96b75/cf/87275cfdd27e41ee93ab5c53a066587b to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/archive/data/default/testExportWithResetTtl/77c6a4d198354393f3bb535244e96b75/cf/87275cfdd27e41ee93ab5c53a066587b 2024-11-23T17:27:44,826 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-11-23T17:27:44,827 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-23T17:27:44,830 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x10168b8ee290003, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-23T17:27:44,830 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45467-0x10168b8ee290001, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-23T17:27:44,830 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44667-0x10168b8ee290002, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-23T17:27:44,830 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45467-0x10168b8ee290001, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:27:44,830 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x10168b8ee290003, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:27:44,830 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44667-0x10168b8ee290002, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:27:44,832 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data null 2024-11-23T17:27:44,832 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-11-23T17:27:44,832 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:27:44,833 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportWithResetTtl/77c6a4d198354393f3bb535244e96b75/recovered.edits/8.seqid to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/archive/data/default/testExportWithResetTtl/77c6a4d198354393f3bb535244e96b75/recovered.edits/8.seqid 2024-11-23T17:27:44,833 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-23T17:27:44,833 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-23T17:27:44,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=88 2024-11-23T17:27:44,834 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-23T17:27:44,834 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-23T17:27:44,834 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportWithResetTtl/77c6a4d198354393f3bb535244e96b75 2024-11-23T17:27:44,835 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testExportWithResetTtl regions 2024-11-23T17:27:44,838 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=88, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-11-23T17:27:44,843 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testExportWithResetTtl from hbase:meta 2024-11-23T17:27:44,848 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'testExportWithResetTtl' descriptor. 2024-11-23T17:27:44,852 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=88, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-11-23T17:27:44,852 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'testExportWithResetTtl' from region states. 2024-11-23T17:27:44,853 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportWithResetTtl,,1732382841728.0edf1213fb703278024472158f703420.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732382864852"}]},"ts":"9223372036854775807"} 2024-11-23T17:27:44,853 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportWithResetTtl,1,1732382841728.77c6a4d198354393f3bb535244e96b75.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732382864852"}]},"ts":"9223372036854775807"} 2024-11-23T17:27:44,858 INFO [PEWorker-2 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-23T17:27:44,858 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 0edf1213fb703278024472158f703420, NAME => 'testExportWithResetTtl,,1732382841728.0edf1213fb703278024472158f703420.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 77c6a4d198354393f3bb535244e96b75, NAME => 'testExportWithResetTtl,1,1732382841728.77c6a4d198354393f3bb535244e96b75.', STARTKEY => '1', ENDKEY => ''}] 2024-11-23T17:27:44,858 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'testExportWithResetTtl' as deleted. 2024-11-23T17:27:44,859 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732382864858"}]},"ts":"9223372036854775807"} 2024-11-23T17:27:44,864 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(867): Deleted table testExportWithResetTtl state from META 2024-11-23T17:27:44,865 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=88, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-11-23T17:27:44,868 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=88, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testExportWithResetTtl in 127 msec 2024-11-23T17:27:44,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=88 2024-11-23T17:27:44,937 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testExportWithResetTtl 2024-11-23T17:27:44,938 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testExportWithResetTtl completed 2024-11-23T17:27:44,939 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportWithResetTtl 2024-11-23T17:27:44,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure2.ProcedureExecutor(1139): Stored pid=89, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithResetTtl 2024-11-23T17:27:44,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=89 2024-11-23T17:27:44,951 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732382864951"}]},"ts":"1732382864951"} 2024-11-23T17:27:44,960 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=DISABLING in hbase:meta 2024-11-23T17:27:44,960 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithResetTtl to state=DISABLING 2024-11-23T17:27:44,962 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=90, ppid=89, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithResetTtl}] 2024-11-23T17:27:44,966 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=efe67ed8d86346ec0aee1231aa1ea894, UNASSIGN}, {pid=92, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=da60fa86fdbf8f39601828babeb53202, UNASSIGN}] 2024-11-23T17:27:44,968 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=92, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=da60fa86fdbf8f39601828babeb53202, UNASSIGN 2024-11-23T17:27:44,969 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=efe67ed8d86346ec0aee1231aa1ea894, UNASSIGN 2024-11-23T17:27:44,971 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=91 updating hbase:meta row=efe67ed8d86346ec0aee1231aa1ea894, regionState=CLOSING, regionLocation=215bb12e999f,41065,1732382621000 2024-11-23T17:27:44,972 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=92 updating hbase:meta row=da60fa86fdbf8f39601828babeb53202, regionState=CLOSING, regionLocation=215bb12e999f,44667,1732382620918 2024-11-23T17:27:44,975 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=efe67ed8d86346ec0aee1231aa1ea894, UNASSIGN because future has completed 2024-11-23T17:27:44,976 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T17:27:44,976 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=93, ppid=91, state=RUNNABLE, hasLock=false; CloseRegionProcedure efe67ed8d86346ec0aee1231aa1ea894, server=215bb12e999f,41065,1732382621000}] 2024-11-23T17:27:44,979 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=92, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=da60fa86fdbf8f39601828babeb53202, UNASSIGN because future has completed 2024-11-23T17:27:44,980 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T17:27:44,980 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=94, ppid=92, state=RUNNABLE, hasLock=false; CloseRegionProcedure da60fa86fdbf8f39601828babeb53202, server=215bb12e999f,44667,1732382620918}] 2024-11-23T17:27:45,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=89 2024-11-23T17:27:45,137 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] handler.UnassignRegionHandler(122): Close efe67ed8d86346ec0aee1231aa1ea894 2024-11-23T17:27:45,138 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T17:27:45,138 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(122): Close da60fa86fdbf8f39601828babeb53202 2024-11-23T17:27:45,138 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T17:27:45,138 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1722): Closing da60fa86fdbf8f39601828babeb53202, disabling compactions & flushes 2024-11-23T17:27:45,138 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1722): Closing efe67ed8d86346ec0aee1231aa1ea894, disabling compactions & flushes 2024-11-23T17:27:45,138 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,1,1732382838210.da60fa86fdbf8f39601828babeb53202. 2024-11-23T17:27:45,138 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,,1732382838210.efe67ed8d86346ec0aee1231aa1ea894. 2024-11-23T17:27:45,138 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,1,1732382838210.da60fa86fdbf8f39601828babeb53202. 2024-11-23T17:27:45,138 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,,1732382838210.efe67ed8d86346ec0aee1231aa1ea894. 2024-11-23T17:27:45,138 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,1,1732382838210.da60fa86fdbf8f39601828babeb53202. after waiting 0 ms 2024-11-23T17:27:45,138 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,,1732382838210.efe67ed8d86346ec0aee1231aa1ea894. after waiting 0 ms 2024-11-23T17:27:45,138 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,1,1732382838210.da60fa86fdbf8f39601828babeb53202. 2024-11-23T17:27:45,138 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,,1732382838210.efe67ed8d86346ec0aee1231aa1ea894. 2024-11-23T17:27:45,203 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithResetTtl/da60fa86fdbf8f39601828babeb53202/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-23T17:27:45,207 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:27:45,207 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,1,1732382838210.da60fa86fdbf8f39601828babeb53202. 2024-11-23T17:27:45,207 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1676): Region close journal for da60fa86fdbf8f39601828babeb53202: Waiting for close lock at 1732382865138Running coprocessor pre-close hooks at 1732382865138Disabling compacts and flushes for region at 1732382865138Disabling writes for close at 1732382865138Writing region close event to WAL at 1732382865182 (+44 ms)Running coprocessor post-close hooks at 1732382865207 (+25 ms)Closed at 1732382865207 2024-11-23T17:27:45,210 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(157): Closed da60fa86fdbf8f39601828babeb53202 2024-11-23T17:27:45,211 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=92 updating hbase:meta row=da60fa86fdbf8f39601828babeb53202, regionState=CLOSED 2024-11-23T17:27:45,213 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=94, ppid=92, state=RUNNABLE, hasLock=false; CloseRegionProcedure da60fa86fdbf8f39601828babeb53202, server=215bb12e999f,44667,1732382620918 because future has completed 2024-11-23T17:27:45,217 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithResetTtl/efe67ed8d86346ec0aee1231aa1ea894/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-23T17:27:45,218 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:27:45,218 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,,1732382838210.efe67ed8d86346ec0aee1231aa1ea894. 2024-11-23T17:27:45,219 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1676): Region close journal for efe67ed8d86346ec0aee1231aa1ea894: Waiting for close lock at 1732382865138Running coprocessor pre-close hooks at 1732382865138Disabling compacts and flushes for region at 1732382865138Disabling writes for close at 1732382865138Writing region close event to WAL at 1732382865200 (+62 ms)Running coprocessor post-close hooks at 1732382865218 (+18 ms)Closed at 1732382865218 2024-11-23T17:27:45,221 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=94, resume processing ppid=92 2024-11-23T17:27:45,221 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=94, ppid=92, state=SUCCESS, hasLock=false; CloseRegionProcedure da60fa86fdbf8f39601828babeb53202, server=215bb12e999f,44667,1732382620918 in 235 msec 2024-11-23T17:27:45,222 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] handler.UnassignRegionHandler(157): Closed efe67ed8d86346ec0aee1231aa1ea894 2024-11-23T17:27:45,223 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=92, ppid=90, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=da60fa86fdbf8f39601828babeb53202, UNASSIGN in 255 msec 2024-11-23T17:27:45,223 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=91 updating hbase:meta row=efe67ed8d86346ec0aee1231aa1ea894, regionState=CLOSED 2024-11-23T17:27:45,225 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=93, ppid=91, state=RUNNABLE, hasLock=false; CloseRegionProcedure efe67ed8d86346ec0aee1231aa1ea894, server=215bb12e999f,41065,1732382621000 because future has completed 2024-11-23T17:27:45,232 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=93, resume processing ppid=91 2024-11-23T17:27:45,232 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=93, ppid=91, state=SUCCESS, hasLock=false; CloseRegionProcedure efe67ed8d86346ec0aee1231aa1ea894, server=215bb12e999f,41065,1732382621000 in 251 msec 2024-11-23T17:27:45,235 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=91, resume processing ppid=90 2024-11-23T17:27:45,235 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=91, ppid=90, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=efe67ed8d86346ec0aee1231aa1ea894, UNASSIGN in 266 msec 2024-11-23T17:27:45,240 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=90, resume processing ppid=89 2024-11-23T17:27:45,240 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=90, ppid=89, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithResetTtl in 274 msec 2024-11-23T17:27:45,242 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732382865242"}]},"ts":"1732382865242"} 2024-11-23T17:27:45,248 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=DISABLED in hbase:meta 2024-11-23T17:27:45,248 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithResetTtl to state=DISABLED 2024-11-23T17:27:45,259 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=89, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithResetTtl in 311 msec 2024-11-23T17:27:45,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=89 2024-11-23T17:27:45,268 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithResetTtl completed 2024-11-23T17:27:45,269 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportWithResetTtl 2024-11-23T17:27:45,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure2.ProcedureExecutor(1139): Stored pid=95, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-23T17:27:45,273 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=95, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-23T17:27:45,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithResetTtl 2024-11-23T17:27:45,275 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=95, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-23T17:27:45,279 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41065 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithResetTtl 2024-11-23T17:27:45,288 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithResetTtl/efe67ed8d86346ec0aee1231aa1ea894 2024-11-23T17:27:45,294 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithResetTtl/efe67ed8d86346ec0aee1231aa1ea894/cf, FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithResetTtl/efe67ed8d86346ec0aee1231aa1ea894/recovered.edits] 2024-11-23T17:27:45,309 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithResetTtl/efe67ed8d86346ec0aee1231aa1ea894/cf/b0a2567cb00d44208980210d938dd942 to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/archive/data/default/testtb-testExportWithResetTtl/efe67ed8d86346ec0aee1231aa1ea894/cf/b0a2567cb00d44208980210d938dd942 2024-11-23T17:27:45,314 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithResetTtl/efe67ed8d86346ec0aee1231aa1ea894/recovered.edits/9.seqid to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/archive/data/default/testtb-testExportWithResetTtl/efe67ed8d86346ec0aee1231aa1ea894/recovered.edits/9.seqid 2024-11-23T17:27:45,316 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithResetTtl/efe67ed8d86346ec0aee1231aa1ea894 2024-11-23T17:27:45,316 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithResetTtl/da60fa86fdbf8f39601828babeb53202 2024-11-23T17:27:45,319 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithResetTtl/da60fa86fdbf8f39601828babeb53202/cf, FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithResetTtl/da60fa86fdbf8f39601828babeb53202/recovered.edits] 2024-11-23T17:27:45,327 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithResetTtl/da60fa86fdbf8f39601828babeb53202/cf/5f9c791b0e484b74802379211e6cc15f to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/archive/data/default/testtb-testExportWithResetTtl/da60fa86fdbf8f39601828babeb53202/cf/5f9c791b0e484b74802379211e6cc15f 2024-11-23T17:27:45,332 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithResetTtl/da60fa86fdbf8f39601828babeb53202/recovered.edits/9.seqid to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/archive/data/default/testtb-testExportWithResetTtl/da60fa86fdbf8f39601828babeb53202/recovered.edits/9.seqid 2024-11-23T17:27:45,333 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithResetTtl/da60fa86fdbf8f39601828babeb53202 2024-11-23T17:27:45,333 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithResetTtl regions 2024-11-23T17:27:45,336 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44667-0x10168b8ee290002, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-23T17:27:45,336 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45467-0x10168b8ee290001, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-23T17:27:45,336 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-23T17:27:45,337 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x10168b8ee290003, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-23T17:27:45,337 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-11-23T17:27:45,337 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-11-23T17:27:45,338 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-11-23T17:27:45,343 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=95, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-23T17:27:45,347 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x10168b8ee290003, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:27:45,348 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44667-0x10168b8ee290002, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-23T17:27:45,348 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data null 2024-11-23T17:27:45,348 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44667-0x10168b8ee290002, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:27:45,348 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-11-23T17:27:45,348 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-23T17:27:45,348 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:27:45,348 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45467-0x10168b8ee290001, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-23T17:27:45,348 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45467-0x10168b8ee290001, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:27:45,350 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithResetTtl from hbase:meta 2024-11-23T17:27:45,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=95 2024-11-23T17:27:45,354 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithResetTtl' descriptor. 2024-11-23T17:27:45,356 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=95, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-23T17:27:45,356 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithResetTtl' from region states. 2024-11-23T17:27:45,356 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl,,1732382838210.efe67ed8d86346ec0aee1231aa1ea894.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732382865356"}]},"ts":"9223372036854775807"} 2024-11-23T17:27:45,356 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl,1,1732382838210.da60fa86fdbf8f39601828babeb53202.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732382865356"}]},"ts":"9223372036854775807"} 2024-11-23T17:27:45,360 INFO [PEWorker-4 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-23T17:27:45,361 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => efe67ed8d86346ec0aee1231aa1ea894, NAME => 'testtb-testExportWithResetTtl,,1732382838210.efe67ed8d86346ec0aee1231aa1ea894.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => da60fa86fdbf8f39601828babeb53202, NAME => 'testtb-testExportWithResetTtl,1,1732382838210.da60fa86fdbf8f39601828babeb53202.', STARTKEY => '1', ENDKEY => ''}] 2024-11-23T17:27:45,361 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithResetTtl' as deleted. 2024-11-23T17:27:45,361 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732382865361"}]},"ts":"9223372036854775807"} 2024-11-23T17:27:45,365 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithResetTtl state from META 2024-11-23T17:27:45,366 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=95, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-23T17:27:45,369 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=95, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithResetTtl in 97 msec 2024-11-23T17:27:45,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=95 2024-11-23T17:27:45,457 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithResetTtl 2024-11-23T17:27:45,457 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithResetTtl completed 2024-11-23T17:27:45,476 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithResetTtl" type: DISABLED 2024-11-23T17:27:45,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithResetTtl 2024-11-23T17:27:45,485 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb-testExportWithResetTtl" type: DISABLED 2024-11-23T17:27:45,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb-testExportWithResetTtl 2024-11-23T17:27:45,491 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithResetTtl" type: DISABLED 2024-11-23T17:27:45,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithResetTtl 2024-11-23T17:27:45,570 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportWithResetTtl Thread=792 (was 783) Potentially hanging thread: ApplicationMasterLauncher #4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-2866 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: process reaper (pid 106472) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1325804072_22 at /127.0.0.1:33032 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35379 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:32987 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1549690208) connection to localhost/127.0.0.1:32987 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1325804072_22 at /127.0.0.1:57804 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1549690208) connection to localhost/127.0.0.1:36999 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1325804072_22 at /127.0.0.1:57006 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=809 (was 807) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1386 (was 1339) - SystemLoadAverage LEAK? -, ProcessCount=32 (was 20) - ProcessCount LEAK? -, AvailableMemoryMB=865 (was 2352) 2024-11-23T17:27:45,570 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=792 is superior to 500 2024-11-23T17:27:45,636 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemState Thread=792, OpenFileDescriptor=809, MaxFileDescriptor=1048576, SystemLoadAverage=1386, ProcessCount=32, AvailableMemoryMB=859 2024-11-23T17:27:45,636 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=792 is superior to 500 2024-11-23T17:27:45,638 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T17:27:45,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure2.ProcedureExecutor(1139): Stored pid=96, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemState 2024-11-23T17:27:45,643 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T17:27:45,644 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:27:45,644 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemState" procId is: 96 2024-11-23T17:27:45,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-11-23T17:27:45,645 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T17:27:45,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-11-23T17:27:45,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742018_1194 (size=407) 2024-11-23T17:27:45,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742018_1194 (size=407) 2024-11-23T17:27:45,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742018_1194 (size=407) 2024-11-23T17:27:45,827 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => e2696e8b9a02ff803f56da2561e06e84, NAME => 'testtb-testExportFileSystemState,,1732382865637.e2696e8b9a02ff803f56da2561e06e84.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf 2024-11-23T17:27:45,836 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => d95e3a1e354a49895c35d856b3add21e, NAME => 'testtb-testExportFileSystemState,1,1732382865637.d95e3a1e354a49895c35d856b3add21e.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf 2024-11-23T17:27:45,944 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-1_3/usercache/jenkins/appcache/application_1732382632246_0003/container_1732382632246_0003_01_000003/launch_container.sh] 2024-11-23T17:27:45,944 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-1_3/usercache/jenkins/appcache/application_1732382632246_0003/container_1732382632246_0003_01_000003/container_tokens] 2024-11-23T17:27:45,944 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-1_3/usercache/jenkins/appcache/application_1732382632246_0003/container_1732382632246_0003_01_000003/sysfs] 2024-11-23T17:27:45,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-11-23T17:27:45,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742019_1195 (size=68) 2024-11-23T17:27:45,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742019_1195 (size=68) 2024-11-23T17:27:45,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742019_1195 (size=68) 2024-11-23T17:27:45,974 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,,1732382865637.e2696e8b9a02ff803f56da2561e06e84.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:27:45,974 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1722): Closing e2696e8b9a02ff803f56da2561e06e84, disabling compactions & flushes 2024-11-23T17:27:45,974 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,,1732382865637.e2696e8b9a02ff803f56da2561e06e84. 2024-11-23T17:27:45,974 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,,1732382865637.e2696e8b9a02ff803f56da2561e06e84. 2024-11-23T17:27:45,974 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,,1732382865637.e2696e8b9a02ff803f56da2561e06e84. after waiting 0 ms 2024-11-23T17:27:45,974 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,,1732382865637.e2696e8b9a02ff803f56da2561e06e84. 2024-11-23T17:27:45,975 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,,1732382865637.e2696e8b9a02ff803f56da2561e06e84. 2024-11-23T17:27:45,975 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1676): Region close journal for e2696e8b9a02ff803f56da2561e06e84: Waiting for close lock at 1732382865974Disabling compacts and flushes for region at 1732382865974Disabling writes for close at 1732382865974Writing region close event to WAL at 1732382865974Closed at 1732382865974 2024-11-23T17:27:46,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742020_1196 (size=68) 2024-11-23T17:27:46,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742020_1196 (size=68) 2024-11-23T17:27:46,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742020_1196 (size=68) 2024-11-23T17:27:46,014 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,1,1732382865637.d95e3a1e354a49895c35d856b3add21e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:27:46,014 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1722): Closing d95e3a1e354a49895c35d856b3add21e, disabling compactions & flushes 2024-11-23T17:27:46,014 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,1,1732382865637.d95e3a1e354a49895c35d856b3add21e. 2024-11-23T17:27:46,014 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,1,1732382865637.d95e3a1e354a49895c35d856b3add21e. 2024-11-23T17:27:46,014 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,1,1732382865637.d95e3a1e354a49895c35d856b3add21e. after waiting 0 ms 2024-11-23T17:27:46,014 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,1,1732382865637.d95e3a1e354a49895c35d856b3add21e. 2024-11-23T17:27:46,014 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,1,1732382865637.d95e3a1e354a49895c35d856b3add21e. 2024-11-23T17:27:46,014 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1676): Region close journal for d95e3a1e354a49895c35d856b3add21e: Waiting for close lock at 1732382866014Disabling compacts and flushes for region at 1732382866014Disabling writes for close at 1732382866014Writing region close event to WAL at 1732382866014Closed at 1732382866014 2024-11-23T17:27:46,017 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T17:27:46,018 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemState,,1732382865637.e2696e8b9a02ff803f56da2561e06e84.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1732382866017"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732382866017"}]},"ts":"1732382866017"} 2024-11-23T17:27:46,018 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemState,1,1732382865637.d95e3a1e354a49895c35d856b3add21e.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1732382866017"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732382866017"}]},"ts":"1732382866017"} 2024-11-23T17:27:46,028 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-23T17:27:46,029 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T17:27:46,029 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732382866029"}]},"ts":"1732382866029"} 2024-11-23T17:27:46,035 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=ENABLING in hbase:meta 2024-11-23T17:27:46,036 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {215bb12e999f=0} racks are {/default-rack=0} 2024-11-23T17:27:46,040 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-23T17:27:46,040 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-23T17:27:46,040 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-23T17:27:46,040 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-23T17:27:46,040 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-23T17:27:46,040 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-23T17:27:46,040 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-23T17:27:46,040 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-23T17:27:46,040 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-23T17:27:46,040 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-23T17:27:46,041 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=e2696e8b9a02ff803f56da2561e06e84, ASSIGN}, {pid=98, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=d95e3a1e354a49895c35d856b3add21e, ASSIGN}] 2024-11-23T17:27:46,043 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=e2696e8b9a02ff803f56da2561e06e84, ASSIGN 2024-11-23T17:27:46,045 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=e2696e8b9a02ff803f56da2561e06e84, ASSIGN; state=OFFLINE, location=215bb12e999f,44667,1732382620918; forceNewPlan=false, retain=false 2024-11-23T17:27:46,047 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=98, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=d95e3a1e354a49895c35d856b3add21e, ASSIGN 2024-11-23T17:27:46,057 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=98, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=d95e3a1e354a49895c35d856b3add21e, ASSIGN; state=OFFLINE, location=215bb12e999f,45467,1732382620632; forceNewPlan=false, retain=false 2024-11-23T17:27:46,196 INFO [215bb12e999f:39621 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-23T17:27:46,197 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=98 updating hbase:meta row=d95e3a1e354a49895c35d856b3add21e, regionState=OPENING, regionLocation=215bb12e999f,45467,1732382620632 2024-11-23T17:27:46,198 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=97 updating hbase:meta row=e2696e8b9a02ff803f56da2561e06e84, regionState=OPENING, regionLocation=215bb12e999f,44667,1732382620918 2024-11-23T17:27:46,205 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=98, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=d95e3a1e354a49895c35d856b3add21e, ASSIGN because future has completed 2024-11-23T17:27:46,205 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=99, ppid=98, state=RUNNABLE, hasLock=false; OpenRegionProcedure d95e3a1e354a49895c35d856b3add21e, server=215bb12e999f,45467,1732382620632}] 2024-11-23T17:27:46,206 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=e2696e8b9a02ff803f56da2561e06e84, ASSIGN because future has completed 2024-11-23T17:27:46,210 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=100, ppid=97, state=RUNNABLE, hasLock=false; OpenRegionProcedure e2696e8b9a02ff803f56da2561e06e84, server=215bb12e999f,44667,1732382620918}] 2024-11-23T17:27:46,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-11-23T17:27:46,372 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemState,1,1732382865637.d95e3a1e354a49895c35d856b3add21e. 2024-11-23T17:27:46,372 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(7752): Opening region: {ENCODED => d95e3a1e354a49895c35d856b3add21e, NAME => 'testtb-testExportFileSystemState,1,1732382865637.d95e3a1e354a49895c35d856b3add21e.', STARTKEY => '1', ENDKEY => ''} 2024-11-23T17:27:46,372 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemState,1,1732382865637.d95e3a1e354a49895c35d856b3add21e. service=AccessControlService 2024-11-23T17:27:46,372 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:27:46,373 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemState d95e3a1e354a49895c35d856b3add21e 2024-11-23T17:27:46,373 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,1,1732382865637.d95e3a1e354a49895c35d856b3add21e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:27:46,373 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(7794): checking encryption for d95e3a1e354a49895c35d856b3add21e 2024-11-23T17:27:46,373 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(7797): checking classloading for d95e3a1e354a49895c35d856b3add21e 2024-11-23T17:27:46,390 INFO [StoreOpener-d95e3a1e354a49895c35d856b3add21e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region d95e3a1e354a49895c35d856b3add21e 2024-11-23T17:27:46,394 INFO [StoreOpener-d95e3a1e354a49895c35d856b3add21e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d95e3a1e354a49895c35d856b3add21e columnFamilyName cf 2024-11-23T17:27:46,394 DEBUG [StoreOpener-d95e3a1e354a49895c35d856b3add21e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:27:46,395 INFO [StoreOpener-d95e3a1e354a49895c35d856b3add21e-1 {}] regionserver.HStore(327): Store=d95e3a1e354a49895c35d856b3add21e/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T17:27:46,395 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1038): replaying wal for d95e3a1e354a49895c35d856b3add21e 2024-11-23T17:27:46,396 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemState/d95e3a1e354a49895c35d856b3add21e 2024-11-23T17:27:46,397 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemState/d95e3a1e354a49895c35d856b3add21e 2024-11-23T17:27:46,398 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1048): stopping wal replay for d95e3a1e354a49895c35d856b3add21e 2024-11-23T17:27:46,398 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1060): Cleaning up temporary data for d95e3a1e354a49895c35d856b3add21e 2024-11-23T17:27:46,399 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemState,,1732382865637.e2696e8b9a02ff803f56da2561e06e84. 2024-11-23T17:27:46,399 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7752): Opening region: {ENCODED => e2696e8b9a02ff803f56da2561e06e84, NAME => 'testtb-testExportFileSystemState,,1732382865637.e2696e8b9a02ff803f56da2561e06e84.', STARTKEY => '', ENDKEY => '1'} 2024-11-23T17:27:46,399 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemState,,1732382865637.e2696e8b9a02ff803f56da2561e06e84. service=AccessControlService 2024-11-23T17:27:46,400 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:27:46,400 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemState e2696e8b9a02ff803f56da2561e06e84 2024-11-23T17:27:46,400 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,,1732382865637.e2696e8b9a02ff803f56da2561e06e84.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:27:46,400 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7794): checking encryption for e2696e8b9a02ff803f56da2561e06e84 2024-11-23T17:27:46,400 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7797): checking classloading for e2696e8b9a02ff803f56da2561e06e84 2024-11-23T17:27:46,401 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1093): writing seq id for d95e3a1e354a49895c35d856b3add21e 2024-11-23T17:27:46,428 INFO [StoreOpener-e2696e8b9a02ff803f56da2561e06e84-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region e2696e8b9a02ff803f56da2561e06e84 2024-11-23T17:27:46,437 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemState/d95e3a1e354a49895c35d856b3add21e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T17:27:46,438 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1114): Opened d95e3a1e354a49895c35d856b3add21e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73418628, jitterRate=0.09402281045913696}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T17:27:46,439 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1122): Running coprocessor post-open hooks for d95e3a1e354a49895c35d856b3add21e 2024-11-23T17:27:46,440 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1006): Region open journal for d95e3a1e354a49895c35d856b3add21e: Running coprocessor pre-open hook at 1732382866373Writing region info on filesystem at 1732382866373Initializing all the Stores at 1732382866375 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732382866375Cleaning up temporary data from old regions at 1732382866398 (+23 ms)Running coprocessor post-open hooks at 1732382866439 (+41 ms)Region opened successfully at 1732382866440 (+1 ms) 2024-11-23T17:27:46,440 INFO [StoreOpener-e2696e8b9a02ff803f56da2561e06e84-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e2696e8b9a02ff803f56da2561e06e84 columnFamilyName cf 2024-11-23T17:27:46,440 DEBUG [StoreOpener-e2696e8b9a02ff803f56da2561e06e84-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:27:46,442 INFO [StoreOpener-e2696e8b9a02ff803f56da2561e06e84-1 {}] regionserver.HStore(327): Store=e2696e8b9a02ff803f56da2561e06e84/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T17:27:46,442 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1038): replaying wal for e2696e8b9a02ff803f56da2561e06e84 2024-11-23T17:27:46,443 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemState/e2696e8b9a02ff803f56da2561e06e84 2024-11-23T17:27:46,444 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemState/e2696e8b9a02ff803f56da2561e06e84 2024-11-23T17:27:46,444 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1048): stopping wal replay for e2696e8b9a02ff803f56da2561e06e84 2024-11-23T17:27:46,444 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1060): Cleaning up temporary data for e2696e8b9a02ff803f56da2561e06e84 2024-11-23T17:27:46,446 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1093): writing seq id for e2696e8b9a02ff803f56da2561e06e84 2024-11-23T17:27:46,452 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemState,1,1732382865637.d95e3a1e354a49895c35d856b3add21e., pid=99, masterSystemTime=1732382866367 2024-11-23T17:27:46,456 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemState,1,1732382865637.d95e3a1e354a49895c35d856b3add21e. 2024-11-23T17:27:46,456 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemState,1,1732382865637.d95e3a1e354a49895c35d856b3add21e. 2024-11-23T17:27:46,457 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=98 updating hbase:meta row=d95e3a1e354a49895c35d856b3add21e, regionState=OPEN, openSeqNum=2, regionLocation=215bb12e999f,45467,1732382620632 2024-11-23T17:27:46,457 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemState/e2696e8b9a02ff803f56da2561e06e84/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T17:27:46,458 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1114): Opened e2696e8b9a02ff803f56da2561e06e84; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64941458, jitterRate=-0.03229686617851257}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T17:27:46,459 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1122): Running coprocessor post-open hooks for e2696e8b9a02ff803f56da2561e06e84 2024-11-23T17:27:46,459 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1006): Region open journal for e2696e8b9a02ff803f56da2561e06e84: Running coprocessor pre-open hook at 1732382866400Writing region info on filesystem at 1732382866400Initializing all the Stores at 1732382866401 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732382866401Cleaning up temporary data from old regions at 1732382866444 (+43 ms)Running coprocessor post-open hooks at 1732382866459 (+15 ms)Region opened successfully at 1732382866459 2024-11-23T17:27:46,461 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemState,,1732382865637.e2696e8b9a02ff803f56da2561e06e84., pid=100, masterSystemTime=1732382866369 2024-11-23T17:27:46,461 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39621 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=OPEN, location=215bb12e999f,45467,1732382620632, table=testtb-testExportFileSystemState, region=d95e3a1e354a49895c35d856b3add21e. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-11-23T17:27:46,461 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=99, ppid=98, state=RUNNABLE, hasLock=false; OpenRegionProcedure d95e3a1e354a49895c35d856b3add21e, server=215bb12e999f,45467,1732382620632 because future has completed 2024-11-23T17:27:46,464 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemState,,1732382865637.e2696e8b9a02ff803f56da2561e06e84. 2024-11-23T17:27:46,464 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemState,,1732382865637.e2696e8b9a02ff803f56da2561e06e84. 2024-11-23T17:27:46,464 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=97 updating hbase:meta row=e2696e8b9a02ff803f56da2561e06e84, regionState=OPEN, openSeqNum=2, regionLocation=215bb12e999f,44667,1732382620918 2024-11-23T17:27:46,467 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=100, ppid=97, state=RUNNABLE, hasLock=false; OpenRegionProcedure e2696e8b9a02ff803f56da2561e06e84, server=215bb12e999f,44667,1732382620918 because future has completed 2024-11-23T17:27:46,476 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=99, resume processing ppid=98 2024-11-23T17:27:46,479 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=99, ppid=98, state=SUCCESS, hasLock=false; OpenRegionProcedure d95e3a1e354a49895c35d856b3add21e, server=215bb12e999f,45467,1732382620632 in 262 msec 2024-11-23T17:27:46,482 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=100, resume processing ppid=97 2024-11-23T17:27:46,482 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=100, ppid=97, state=SUCCESS, hasLock=false; OpenRegionProcedure e2696e8b9a02ff803f56da2561e06e84, server=215bb12e999f,44667,1732382620918 in 266 msec 2024-11-23T17:27:46,485 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=98, ppid=96, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=d95e3a1e354a49895c35d856b3add21e, ASSIGN in 438 msec 2024-11-23T17:27:46,489 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T17:27:46,489 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732382866489"}]},"ts":"1732382866489"} 2024-11-23T17:27:46,490 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=97, resume processing ppid=96 2024-11-23T17:27:46,490 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=97, ppid=96, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=e2696e8b9a02ff803f56da2561e06e84, ASSIGN in 441 msec 2024-11-23T17:27:46,492 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=ENABLED in hbase:meta 2024-11-23T17:27:46,494 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T17:27:46,495 DEBUG [PEWorker-3 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemState jenkins: RWXCA 2024-11-23T17:27:46,500 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41065 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-11-23T17:27:46,515 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45467-0x10168b8ee290001, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:27:46,515 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44667-0x10168b8ee290002, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:27:46,515 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:27:46,516 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x10168b8ee290003, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:27:46,536 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-23T17:27:46,536 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-23T17:27:46,539 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-23T17:27:46,541 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-23T17:27:46,544 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=96, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemState in 899 msec 2024-11-23T17:27:46,708 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-0_3/usercache/jenkins/appcache/application_1732382632246_0003/container_1732382632246_0003_01_000002/launch_container.sh] 2024-11-23T17:27:46,709 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-0_3/usercache/jenkins/appcache/application_1732382632246_0003/container_1732382632246_0003_01_000002/container_tokens] 2024-11-23T17:27:46,709 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-0_3/usercache/jenkins/appcache/application_1732382632246_0003/container_1732382632246_0003_01_000002/sysfs] 2024-11-23T17:27:46,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-11-23T17:27:46,787 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemState completed 2024-11-23T17:27:46,787 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportFileSystemState get assigned. Timeout = 60000ms 2024-11-23T17:27:46,788 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T17:27:46,795 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportFileSystemState assigned to meta. Checking AM states. 2024-11-23T17:27:46,795 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T17:27:46,795 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportFileSystemState assigned. 2024-11-23T17:27:46,795 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-23T17:27:46,804 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-11-23T17:27:46,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732382866804 (current time:1732382866804). 2024-11-23T17:27:46,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-23T17:27:46,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemState VERSION not specified, setting to 2 2024-11-23T17:27:46,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-23T17:27:46,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@551bc6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:27:46,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.ClusterIdFetcher(90): Going to request 215bb12e999f,39621,-1 for getting cluster id 2024-11-23T17:27:46,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:27:46,818 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d' 2024-11-23T17:27:46,819 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:27:46,819 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d" 2024-11-23T17:27:46,819 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72f82a18, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:27:46,819 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [215bb12e999f,39621,-1] 2024-11-23T17:27:46,820 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:27:46,820 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:27:46,822 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36102, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:27:46,824 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4fc1a22a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:27:46,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:27:46,826 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=215bb12e999f,44667,1732382620918, seqNum=-1] 2024-11-23T17:27:46,826 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:27:46,831 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35556, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:27:46,837 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621. 2024-11-23T17:27:46,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:27:46,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:27:46,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:27:46,841 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:27:46,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@57cc5e93, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:27:46,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.ClusterIdFetcher(90): Going to request 215bb12e999f,39621,-1 for getting cluster id 2024-11-23T17:27:46,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:27:46,852 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d' 2024-11-23T17:27:46,853 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:27:46,853 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d" 2024-11-23T17:27:46,853 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@147f4412, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:27:46,853 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [215bb12e999f,39621,-1] 2024-11-23T17:27:46,853 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:27:46,854 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:27:46,857 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36130, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:27:46,858 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@25508475, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:27:46,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:27:46,860 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=215bb12e999f,44667,1732382620918, seqNum=-1] 2024-11-23T17:27:46,861 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:27:46,862 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35560, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:27:46,865 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1732382625522.41264850465b123187f03a78b2521b39., hostname=215bb12e999f,41065,1732382621000, seqNum=2] 2024-11-23T17:27:46,865 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:27:46,868 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37212, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:27:46,870 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621. 2024-11-23T17:27:46,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor297.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:27:46,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:27:46,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:27:46,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-11-23T17:27:46,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-23T17:27:46,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure2.ProcedureExecutor(1139): Stored pid=101, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-11-23T17:27:46,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 101 2024-11-23T17:27:46,874 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:27:46,876 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-23T17:27:46,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=101 2024-11-23T17:27:46,877 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-23T17:27:46,881 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-23T17:27:46,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=101 2024-11-23T17:27:47,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742021_1197 (size=170) 2024-11-23T17:27:47,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742021_1197 (size=170) 2024-11-23T17:27:47,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742021_1197 (size=170) 2024-11-23T17:27:47,077 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-23T17:27:47,077 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=102, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e2696e8b9a02ff803f56da2561e06e84}, {pid=103, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d95e3a1e354a49895c35d856b3add21e}] 2024-11-23T17:27:47,078 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=103, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d95e3a1e354a49895c35d856b3add21e 2024-11-23T17:27:47,079 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=102, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e2696e8b9a02ff803f56da2561e06e84 2024-11-23T17:27:47,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=101 2024-11-23T17:27:47,231 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44667 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=102 2024-11-23T17:27:47,231 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45467 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=103 2024-11-23T17:27:47,232 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,1,1732382865637.d95e3a1e354a49895c35d856b3add21e. 2024-11-23T17:27:47,232 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,,1732382865637.e2696e8b9a02ff803f56da2561e06e84. 2024-11-23T17:27:47,232 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.HRegion(2603): Flush status journal for d95e3a1e354a49895c35d856b3add21e: 2024-11-23T17:27:47,232 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,1,1732382865637.d95e3a1e354a49895c35d856b3add21e. for emptySnaptb0-testExportFileSystemState completed. 2024-11-23T17:27:47,232 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.HRegion(2603): Flush status journal for e2696e8b9a02ff803f56da2561e06e84: 2024-11-23T17:27:47,232 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,,1732382865637.e2696e8b9a02ff803f56da2561e06e84. for emptySnaptb0-testExportFileSystemState completed. 2024-11-23T17:27:47,232 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,1,1732382865637.d95e3a1e354a49895c35d856b3add21e.' region-info for snapshot=emptySnaptb0-testExportFileSystemState 2024-11-23T17:27:47,232 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,,1732382865637.e2696e8b9a02ff803f56da2561e06e84.' region-info for snapshot=emptySnaptb0-testExportFileSystemState 2024-11-23T17:27:47,233 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:27:47,233 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:27:47,233 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-23T17:27:47,233 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-23T17:27:47,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742023_1199 (size=71) 2024-11-23T17:27:47,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742023_1199 (size=71) 2024-11-23T17:27:47,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742022_1198 (size=71) 2024-11-23T17:27:47,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742022_1198 (size=71) 2024-11-23T17:27:47,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742023_1199 (size=71) 2024-11-23T17:27:47,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742022_1198 (size=71) 2024-11-23T17:27:47,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=101 2024-11-23T17:27:47,656 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,,1732382865637.e2696e8b9a02ff803f56da2561e06e84. 2024-11-23T17:27:47,656 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=102 2024-11-23T17:27:47,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.HMaster(4169): Remote procedure done, pid=102 2024-11-23T17:27:47,657 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemState on region e2696e8b9a02ff803f56da2561e06e84 2024-11-23T17:27:47,657 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=102, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e2696e8b9a02ff803f56da2561e06e84 2024-11-23T17:27:47,659 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,1,1732382865637.d95e3a1e354a49895c35d856b3add21e. 2024-11-23T17:27:47,659 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=103 2024-11-23T17:27:47,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.HMaster(4169): Remote procedure done, pid=103 2024-11-23T17:27:47,660 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemState on region d95e3a1e354a49895c35d856b3add21e 2024-11-23T17:27:47,660 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=103, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d95e3a1e354a49895c35d856b3add21e 2024-11-23T17:27:47,678 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=102, ppid=101, state=SUCCESS, hasLock=false; SnapshotRegionProcedure e2696e8b9a02ff803f56da2561e06e84 in 587 msec 2024-11-23T17:27:47,679 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=103, resume processing ppid=101 2024-11-23T17:27:47,679 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=103, ppid=101, state=SUCCESS, hasLock=false; SnapshotRegionProcedure d95e3a1e354a49895c35d856b3add21e in 591 msec 2024-11-23T17:27:47,679 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-23T17:27:47,681 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-23T17:27:47,682 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-23T17:27:47,682 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemState 2024-11-23T17:27:47,684 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemState 2024-11-23T17:27:47,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742024_1200 (size=552) 2024-11-23T17:27:47,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742024_1200 (size=552) 2024-11-23T17:27:47,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742024_1200 (size=552) 2024-11-23T17:27:47,847 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-23T17:27:47,874 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-23T17:27:47,875 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemState to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/emptySnaptb0-testExportFileSystemState 2024-11-23T17:27:47,878 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-23T17:27:47,879 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 101 2024-11-23T17:27:47,884 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=101, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } in 1.0070 sec 2024-11-23T17:27:48,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=101 2024-11-23T17:27:48,017 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemState completed 2024-11-23T17:27:48,028 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemState', row='1d7d16edb5d78e228b239732f441b9333', locateType=CURRENT is [region=testtb-testExportFileSystemState,1,1732382865637.d95e3a1e354a49895c35d856b3add21e., hostname=215bb12e999f,45467,1732382620632, seqNum=2] 2024-11-23T17:27:48,029 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemState', row='0924ab5be24f4b51c8cbb7f56cf9bd554', locateType=CURRENT is [region=testtb-testExportFileSystemState,,1732382865637.e2696e8b9a02ff803f56da2561e06e84., hostname=215bb12e999f,44667,1732382620918, seqNum=2] 2024-11-23T17:27:48,031 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemState', row='25ec308632011c55c850f1b68fac52247', locateType=CURRENT is [region=testtb-testExportFileSystemState,1,1732382865637.d95e3a1e354a49895c35d856b3add21e., hostname=215bb12e999f,45467,1732382620632, seqNum=2] 2024-11-23T17:27:48,032 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemState', row='3681a7b9fc31d37ff9ce052c4a67476e0', locateType=CURRENT is [region=testtb-testExportFileSystemState,1,1732382865637.d95e3a1e354a49895c35d856b3add21e., hostname=215bb12e999f,45467,1732382620632, seqNum=2] 2024-11-23T17:27:48,036 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemState', row='4df0038106b2bf2c55247f461ec9ea0c9', locateType=CURRENT is [region=testtb-testExportFileSystemState,1,1732382865637.d95e3a1e354a49895c35d856b3add21e., hostname=215bb12e999f,45467,1732382620632, seqNum=2] 2024-11-23T17:27:48,043 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732382632246_0003_000001 (auth:SIMPLE) from 127.0.0.1:46510 2024-11-23T17:27:48,067 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44667 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemState,,1732382865637.e2696e8b9a02ff803f56da2561e06e84. with WAL disabled. Data may be lost in the event of a crash. 2024-11-23T17:27:48,068 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:27:48,094 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58848, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:27:48,102 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45467 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemState,1,1732382865637.d95e3a1e354a49895c35d856b3add21e. with WAL disabled. Data may be lost in the event of a crash. 2024-11-23T17:27:48,105 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-23T17:27:48,111 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemState 2024-11-23T17:27:48,111 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemState,,1732382865637.e2696e8b9a02ff803f56da2561e06e84. 2024-11-23T17:27:48,111 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T17:27:48,114 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-23T17:27:48,122 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-23T17:27:48,135 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-23T17:27:48,138 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-11-23T17:27:48,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732382868138 (current time:1732382868138). 2024-11-23T17:27:48,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-23T17:27:48,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemState VERSION not specified, setting to 2 2024-11-23T17:27:48,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-23T17:27:48,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@20de69b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:27:48,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.ClusterIdFetcher(90): Going to request 215bb12e999f,39621,-1 for getting cluster id 2024-11-23T17:27:48,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:27:48,147 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d' 2024-11-23T17:27:48,147 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:27:48,147 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d" 2024-11-23T17:27:48,147 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5a88f0ae, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:27:48,147 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [215bb12e999f,39621,-1] 2024-11-23T17:27:48,148 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:27:48,148 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:27:48,149 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36144, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:27:48,150 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5f5de199, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:27:48,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:27:48,152 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=215bb12e999f,44667,1732382620918, seqNum=-1] 2024-11-23T17:27:48,153 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:27:48,154 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35562, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:27:48,156 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621. 2024-11-23T17:27:48,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:27:48,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:27:48,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:27:48,157 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:27:48,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4a61d4d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:27:48,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.ClusterIdFetcher(90): Going to request 215bb12e999f,39621,-1 for getting cluster id 2024-11-23T17:27:48,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:27:48,166 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d' 2024-11-23T17:27:48,167 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:27:48,167 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d" 2024-11-23T17:27:48,167 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@283ca140, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:27:48,167 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [215bb12e999f,39621,-1] 2024-11-23T17:27:48,168 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:27:48,169 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36158, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:27:48,170 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@a358d65, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:27:48,170 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:27:48,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:27:48,172 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=215bb12e999f,44667,1732382620918, seqNum=-1] 2024-11-23T17:27:48,173 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:27:48,174 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35572, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:27:48,175 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1732382625522.41264850465b123187f03a78b2521b39., hostname=215bb12e999f,41065,1732382621000, seqNum=2] 2024-11-23T17:27:48,176 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:27:48,184 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37226, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:27:48,189 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621. 2024-11-23T17:27:48,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor297.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:27:48,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:27:48,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:27:48,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-11-23T17:27:48,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-23T17:27:48,198 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:27:48,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure2.ProcedureExecutor(1139): Stored pid=104, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-11-23T17:27:48,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 104 2024-11-23T17:27:48,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=104 2024-11-23T17:27:48,203 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-23T17:27:48,205 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-23T17:27:48,208 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-23T17:27:48,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=104 2024-11-23T17:27:48,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742025_1201 (size=165) 2024-11-23T17:27:48,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742025_1201 (size=165) 2024-11-23T17:27:48,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742025_1201 (size=165) 2024-11-23T17:27:48,317 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-23T17:27:48,318 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=105, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e2696e8b9a02ff803f56da2561e06e84}, {pid=106, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d95e3a1e354a49895c35d856b3add21e}] 2024-11-23T17:27:48,319 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=105, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e2696e8b9a02ff803f56da2561e06e84 2024-11-23T17:27:48,319 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=106, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d95e3a1e354a49895c35d856b3add21e 2024-11-23T17:27:48,470 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44667 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=105 2024-11-23T17:27:48,471 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45467 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=106 2024-11-23T17:27:48,471 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,1,1732382865637.d95e3a1e354a49895c35d856b3add21e. 2024-11-23T17:27:48,471 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,,1732382865637.e2696e8b9a02ff803f56da2561e06e84. 2024-11-23T17:27:48,471 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegion(2902): Flushing d95e3a1e354a49895c35d856b3add21e 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-11-23T17:27:48,472 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegion(2902): Flushing e2696e8b9a02ff803f56da2561e06e84 1/1 column families, dataSize=199 B heapSize=688 B 2024-11-23T17:27:48,497 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemState/d95e3a1e354a49895c35d856b3add21e/.tmp/cf/1aad4ed5ef6d43d7946cffe5f8c7917c is 71, key is 18b47c8c7b65a2dcac6aab18ac6da153/cf:q/1732382868101/Put/seqid=0 2024-11-23T17:27:48,497 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemState/e2696e8b9a02ff803f56da2561e06e84/.tmp/cf/40ff0bbee899478685c99d57ba4c658a is 71, key is 0daf91be1c4fed431b312850eb4e2da5/cf:q/1732382868066/Put/seqid=0 2024-11-23T17:27:48,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=104 2024-11-23T17:27:48,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742027_1203 (size=5288) 2024-11-23T17:27:48,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742027_1203 (size=5288) 2024-11-23T17:27:48,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742027_1203 (size=5288) 2024-11-23T17:27:48,601 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=199 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemState/e2696e8b9a02ff803f56da2561e06e84/.tmp/cf/40ff0bbee899478685c99d57ba4c658a 2024-11-23T17:27:48,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742026_1202 (size=8324) 2024-11-23T17:27:48,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742026_1202 (size=8324) 2024-11-23T17:27:48,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742026_1202 (size=8324) 2024-11-23T17:27:48,618 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemState/e2696e8b9a02ff803f56da2561e06e84/.tmp/cf/40ff0bbee899478685c99d57ba4c658a as hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemState/e2696e8b9a02ff803f56da2561e06e84/cf/40ff0bbee899478685c99d57ba4c658a 2024-11-23T17:27:48,627 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemState/e2696e8b9a02ff803f56da2561e06e84/cf/40ff0bbee899478685c99d57ba4c658a, entries=3, sequenceid=6, filesize=5.2 K 2024-11-23T17:27:48,629 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegion(3140): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for e2696e8b9a02ff803f56da2561e06e84 in 157ms, sequenceid=6, compaction requested=false 2024-11-23T17:27:48,629 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemState' 2024-11-23T17:27:48,631 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegion(2603): Flush status journal for e2696e8b9a02ff803f56da2561e06e84: 2024-11-23T17:27:48,631 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,,1732382865637.e2696e8b9a02ff803f56da2561e06e84. for snaptb0-testExportFileSystemState completed. 2024-11-23T17:27:48,631 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,,1732382865637.e2696e8b9a02ff803f56da2561e06e84.' region-info for snapshot=snaptb0-testExportFileSystemState 2024-11-23T17:27:48,631 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:27:48,631 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemState/e2696e8b9a02ff803f56da2561e06e84/cf/40ff0bbee899478685c99d57ba4c658a] hfiles 2024-11-23T17:27:48,631 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemState/e2696e8b9a02ff803f56da2561e06e84/cf/40ff0bbee899478685c99d57ba4c658a for snapshot=snaptb0-testExportFileSystemState 2024-11-23T17:27:48,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742028_1204 (size=110) 2024-11-23T17:27:48,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742028_1204 (size=110) 2024-11-23T17:27:48,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742028_1204 (size=110) 2024-11-23T17:27:48,675 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,,1732382865637.e2696e8b9a02ff803f56da2561e06e84. 2024-11-23T17:27:48,675 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=105 2024-11-23T17:27:48,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.HMaster(4169): Remote procedure done, pid=105 2024-11-23T17:27:48,676 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemState on region e2696e8b9a02ff803f56da2561e06e84 2024-11-23T17:27:48,676 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=105, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e2696e8b9a02ff803f56da2561e06e84 2024-11-23T17:27:48,679 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=105, ppid=104, state=SUCCESS, hasLock=false; SnapshotRegionProcedure e2696e8b9a02ff803f56da2561e06e84 in 360 msec 2024-11-23T17:27:48,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=104 2024-11-23T17:27:49,056 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.06 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemState/d95e3a1e354a49895c35d856b3add21e/.tmp/cf/1aad4ed5ef6d43d7946cffe5f8c7917c 2024-11-23T17:27:49,070 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemState/d95e3a1e354a49895c35d856b3add21e/.tmp/cf/1aad4ed5ef6d43d7946cffe5f8c7917c as hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemState/d95e3a1e354a49895c35d856b3add21e/cf/1aad4ed5ef6d43d7946cffe5f8c7917c 2024-11-23T17:27:49,079 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemState/d95e3a1e354a49895c35d856b3add21e/cf/1aad4ed5ef6d43d7946cffe5f8c7917c, entries=47, sequenceid=6, filesize=8.1 K 2024-11-23T17:27:49,080 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegion(3140): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for d95e3a1e354a49895c35d856b3add21e in 609ms, sequenceid=6, compaction requested=false 2024-11-23T17:27:49,080 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegion(2603): Flush status journal for d95e3a1e354a49895c35d856b3add21e: 2024-11-23T17:27:49,080 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,1,1732382865637.d95e3a1e354a49895c35d856b3add21e. for snaptb0-testExportFileSystemState completed. 2024-11-23T17:27:49,080 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,1,1732382865637.d95e3a1e354a49895c35d856b3add21e.' region-info for snapshot=snaptb0-testExportFileSystemState 2024-11-23T17:27:49,080 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:27:49,080 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemState/d95e3a1e354a49895c35d856b3add21e/cf/1aad4ed5ef6d43d7946cffe5f8c7917c] hfiles 2024-11-23T17:27:49,080 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemState/d95e3a1e354a49895c35d856b3add21e/cf/1aad4ed5ef6d43d7946cffe5f8c7917c for snapshot=snaptb0-testExportFileSystemState 2024-11-23T17:27:49,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742029_1205 (size=110) 2024-11-23T17:27:49,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742029_1205 (size=110) 2024-11-23T17:27:49,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742029_1205 (size=110) 2024-11-23T17:27:49,103 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,1,1732382865637.d95e3a1e354a49895c35d856b3add21e. 2024-11-23T17:27:49,103 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=106 2024-11-23T17:27:49,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.HMaster(4169): Remote procedure done, pid=106 2024-11-23T17:27:49,103 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemState on region d95e3a1e354a49895c35d856b3add21e 2024-11-23T17:27:49,104 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=106, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d95e3a1e354a49895c35d856b3add21e 2024-11-23T17:27:49,108 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=106, resume processing ppid=104 2024-11-23T17:27:49,108 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-23T17:27:49,108 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=106, ppid=104, state=SUCCESS, hasLock=false; SnapshotRegionProcedure d95e3a1e354a49895c35d856b3add21e in 787 msec 2024-11-23T17:27:49,109 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-23T17:27:49,110 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-23T17:27:49,110 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemState 2024-11-23T17:27:49,111 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-11-23T17:27:49,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742030_1206 (size=630) 2024-11-23T17:27:49,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742030_1206 (size=630) 2024-11-23T17:27:49,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742030_1206 (size=630) 2024-11-23T17:27:49,155 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-23T17:27:49,162 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-23T17:27:49,163 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-11-23T17:27:49,166 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-23T17:27:49,166 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 104 2024-11-23T17:27:49,169 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=104, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } in 975 msec 2024-11-23T17:27:49,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=104 2024-11-23T17:27:49,345 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemState completed 2024-11-23T17:27:49,346 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732382869345 2024-11-23T17:27:49,346 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:37605, tgtDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732382869345, rawTgtDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732382869345, srcFsUri=hdfs://localhost:37605, srcDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf 2024-11-23T17:27:49,387 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:37605, inputRoot=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf 2024-11-23T17:27:49,387 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1325804072_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732382869345, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732382869345/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-11-23T17:27:49,399 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-11-23T17:27:49,410 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/snaptb0-testExportFileSystemState to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732382869345/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-11-23T17:27:49,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742031_1207 (size=165) 2024-11-23T17:27:49,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742031_1207 (size=165) 2024-11-23T17:27:49,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742031_1207 (size=165) 2024-11-23T17:27:49,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742032_1208 (size=630) 2024-11-23T17:27:49,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742032_1208 (size=630) 2024-11-23T17:27:49,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742032_1208 (size=630) 2024-11-23T17:27:49,485 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:27:49,485 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:27:49,486 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:27:50,001 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-23T17:27:50,046 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState 2024-11-23T17:27:50,046 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState Metrics about Tables on a single HBase RegionServer 2024-11-23T17:27:50,047 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl 2024-11-23T17:27:50,047 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl 2024-11-23T17:27:50,532 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/hadoop-13017505959807543578.jar 2024-11-23T17:27:50,532 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:27:50,533 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:27:50,602 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/hadoop-3762718067758330216.jar 2024-11-23T17:27:50,603 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:27:50,603 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:27:50,603 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:27:50,603 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:27:50,604 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:27:50,604 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:27:50,604 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-23T17:27:50,604 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-23T17:27:50,605 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-23T17:27:50,605 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-23T17:27:50,605 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-23T17:27:50,605 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-23T17:27:50,605 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-23T17:27:50,606 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-23T17:27:50,606 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-23T17:27:50,606 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-23T17:27:50,606 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-23T17:27:50,606 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:27:50,607 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:27:50,607 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T17:27:50,607 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:27:50,607 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:27:50,607 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T17:27:50,608 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T17:27:50,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742033_1209 (size=131440) 2024-11-23T17:27:50,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742033_1209 (size=131440) 2024-11-23T17:27:50,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742033_1209 (size=131440) 2024-11-23T17:27:50,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742034_1210 (size=4188619) 2024-11-23T17:27:50,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742034_1210 (size=4188619) 2024-11-23T17:27:50,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742034_1210 (size=4188619) 2024-11-23T17:27:50,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742035_1211 (size=1323991) 2024-11-23T17:27:50,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742035_1211 (size=1323991) 2024-11-23T17:27:50,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742035_1211 (size=1323991) 2024-11-23T17:27:50,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742036_1212 (size=903733) 2024-11-23T17:27:50,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742036_1212 (size=903733) 2024-11-23T17:27:50,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742036_1212 (size=903733) 2024-11-23T17:27:50,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742037_1213 (size=8360083) 2024-11-23T17:27:50,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742037_1213 (size=8360083) 2024-11-23T17:27:50,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742037_1213 (size=8360083) 2024-11-23T17:27:50,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742038_1214 (size=1877034) 2024-11-23T17:27:50,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742038_1214 (size=1877034) 2024-11-23T17:27:50,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742038_1214 (size=1877034) 2024-11-23T17:27:50,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742039_1215 (size=77835) 2024-11-23T17:27:50,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742039_1215 (size=77835) 2024-11-23T17:27:50,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742039_1215 (size=77835) 2024-11-23T17:27:50,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742040_1216 (size=6424742) 2024-11-23T17:27:50,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742040_1216 (size=6424742) 2024-11-23T17:27:50,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742040_1216 (size=6424742) 2024-11-23T17:27:50,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742041_1217 (size=30949) 2024-11-23T17:27:50,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742041_1217 (size=30949) 2024-11-23T17:27:50,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742041_1217 (size=30949) 2024-11-23T17:27:50,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742042_1218 (size=1597347) 2024-11-23T17:27:50,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742042_1218 (size=1597347) 2024-11-23T17:27:50,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742042_1218 (size=1597347) 2024-11-23T17:27:50,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742043_1219 (size=4695811) 2024-11-23T17:27:50,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742043_1219 (size=4695811) 2024-11-23T17:27:50,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742043_1219 (size=4695811) 2024-11-23T17:27:50,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742044_1220 (size=232957) 2024-11-23T17:27:50,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742044_1220 (size=232957) 2024-11-23T17:27:50,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742044_1220 (size=232957) 2024-11-23T17:27:50,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742045_1221 (size=127628) 2024-11-23T17:27:50,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742045_1221 (size=127628) 2024-11-23T17:27:50,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742045_1221 (size=127628) 2024-11-23T17:27:50,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742046_1222 (size=20406) 2024-11-23T17:27:50,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742046_1222 (size=20406) 2024-11-23T17:27:50,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742046_1222 (size=20406) 2024-11-23T17:27:50,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742047_1223 (size=5175431) 2024-11-23T17:27:50,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742047_1223 (size=5175431) 2024-11-23T17:27:50,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742047_1223 (size=5175431) 2024-11-23T17:27:50,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742048_1224 (size=217634) 2024-11-23T17:27:50,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742048_1224 (size=217634) 2024-11-23T17:27:50,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742048_1224 (size=217634) 2024-11-23T17:27:50,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742049_1225 (size=1832290) 2024-11-23T17:27:50,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742049_1225 (size=1832290) 2024-11-23T17:27:50,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742049_1225 (size=1832290) 2024-11-23T17:27:50,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742050_1226 (size=322274) 2024-11-23T17:27:50,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742050_1226 (size=322274) 2024-11-23T17:27:50,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742050_1226 (size=322274) 2024-11-23T17:27:50,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742051_1227 (size=503880) 2024-11-23T17:27:50,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742051_1227 (size=503880) 2024-11-23T17:27:50,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742051_1227 (size=503880) 2024-11-23T17:27:50,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742052_1228 (size=29229) 2024-11-23T17:27:50,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742052_1228 (size=29229) 2024-11-23T17:27:50,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742052_1228 (size=29229) 2024-11-23T17:27:50,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742053_1229 (size=24096) 2024-11-23T17:27:50,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742053_1229 (size=24096) 2024-11-23T17:27:50,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742053_1229 (size=24096) 2024-11-23T17:27:50,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742054_1230 (size=440957) 2024-11-23T17:27:50,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742054_1230 (size=440957) 2024-11-23T17:27:50,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742054_1230 (size=440957) 2024-11-23T17:27:50,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742055_1231 (size=111872) 2024-11-23T17:27:50,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742055_1231 (size=111872) 2024-11-23T17:27:50,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742055_1231 (size=111872) 2024-11-23T17:27:50,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742056_1232 (size=45609) 2024-11-23T17:27:50,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742056_1232 (size=45609) 2024-11-23T17:27:50,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742056_1232 (size=45609) 2024-11-23T17:27:50,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742057_1233 (size=136454) 2024-11-23T17:27:50,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742057_1233 (size=136454) 2024-11-23T17:27:50,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742057_1233 (size=136454) 2024-11-23T17:27:50,954 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-23T17:27:50,956 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportFileSystemState' hfile list 2024-11-23T17:27:50,958 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=8.1 K 2024-11-23T17:27:50,958 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=5.2 K 2024-11-23T17:27:50,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742058_1234 (size=447) 2024-11-23T17:27:50,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742058_1234 (size=447) 2024-11-23T17:27:50,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742058_1234 (size=447) 2024-11-23T17:27:50,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742059_1235 (size=21) 2024-11-23T17:27:50,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742059_1235 (size=21) 2024-11-23T17:27:50,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742059_1235 (size=21) 2024-11-23T17:27:50,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742060_1236 (size=304008) 2024-11-23T17:27:50,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742060_1236 (size=304008) 2024-11-23T17:27:50,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742060_1236 (size=304008) 2024-11-23T17:27:51,023 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-23T17:27:51,023 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-23T17:27:51,033 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732382632246_0004_000001 (auth:SIMPLE) from 127.0.0.1:46514 2024-11-23T17:27:53,153 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-1_2/usercache/jenkins/appcache/application_1732382632246_0003/container_1732382632246_0003_01_000001/launch_container.sh] 2024-11-23T17:27:53,154 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-1_2/usercache/jenkins/appcache/application_1732382632246_0003/container_1732382632246_0003_01_000001/container_tokens] 2024-11-23T17:27:53,154 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-1_2/usercache/jenkins/appcache/application_1732382632246_0003/container_1732382632246_0003_01_000001/sysfs] 2024-11-23T17:27:55,549 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-23T17:27:56,769 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732382632246_0004_000001 (auth:SIMPLE) from 127.0.0.1:58168 2024-11-23T17:27:57,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742061_1237 (size=349706) 2024-11-23T17:27:57,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742061_1237 (size=349706) 2024-11-23T17:27:57,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742061_1237 (size=349706) 2024-11-23T17:27:59,131 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732382632246_0004_000001 (auth:SIMPLE) from 127.0.0.1:41292 2024-11-23T17:27:59,133 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732382632246_0004_000001 (auth:SIMPLE) from 127.0.0.1:52158 2024-11-23T17:28:05,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742062_1238 (size=8324) 2024-11-23T17:28:05,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742062_1238 (size=8324) 2024-11-23T17:28:05,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742062_1238 (size=8324) 2024-11-23T17:28:07,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742064_1240 (size=5288) 2024-11-23T17:28:07,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742064_1240 (size=5288) 2024-11-23T17:28:07,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742064_1240 (size=5288) 2024-11-23T17:28:07,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742063_1239 (size=22170) 2024-11-23T17:28:07,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742063_1239 (size=22170) 2024-11-23T17:28:07,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742063_1239 (size=22170) 2024-11-23T17:28:07,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742065_1241 (size=466) 2024-11-23T17:28:07,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742065_1241 (size=466) 2024-11-23T17:28:07,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742065_1241 (size=466) 2024-11-23T17:28:07,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742066_1242 (size=22170) 2024-11-23T17:28:07,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742066_1242 (size=22170) 2024-11-23T17:28:07,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742066_1242 (size=22170) 2024-11-23T17:28:07,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742067_1243 (size=349706) 2024-11-23T17:28:07,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742067_1243 (size=349706) 2024-11-23T17:28:07,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742067_1243 (size=349706) 2024-11-23T17:28:07,457 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732382632246_0004_000001 (auth:SIMPLE) from 127.0.0.1:43130 2024-11-23T17:28:07,481 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1732382632246_0004_01_000003 is : 143 2024-11-23T17:28:07,496 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-0_3/usercache/jenkins/appcache/application_1732382632246_0004/container_1732382632246_0004_01_000003/launch_container.sh] 2024-11-23T17:28:07,496 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-0_3/usercache/jenkins/appcache/application_1732382632246_0004/container_1732382632246_0004_01_000003/container_tokens] 2024-11-23T17:28:07,497 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-0_3/usercache/jenkins/appcache/application_1732382632246_0004/container_1732382632246_0004_01_000003/sysfs] 2024-11-23T17:28:08,090 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T17:28:09,235 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-11-23T17:28:09,236 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-11-23T17:28:09,245 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testExportFileSystemState 2024-11-23T17:28:09,245 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-11-23T17:28:09,246 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-11-23T17:28:09,246 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1325804072_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/snaptb0-testExportFileSystemState at hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-11-23T17:28:09,246 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/snaptb0-testExportFileSystemState/.snapshotinfo 2024-11-23T17:28:09,246 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/snaptb0-testExportFileSystemState/data.manifest 2024-11-23T17:28:09,246 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1325804072_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732382869345/.hbase-snapshot/snaptb0-testExportFileSystemState at hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732382869345/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-11-23T17:28:09,246 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732382869345/.hbase-snapshot/snaptb0-testExportFileSystemState/.snapshotinfo 2024-11-23T17:28:09,246 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732382869345/.hbase-snapshot/snaptb0-testExportFileSystemState/data.manifest 2024-11-23T17:28:09,252 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemState 2024-11-23T17:28:09,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure2.ProcedureExecutor(1139): Stored pid=107, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemState 2024-11-23T17:28:09,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=107 2024-11-23T17:28:09,255 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732382889255"}]},"ts":"1732382889255"} 2024-11-23T17:28:09,257 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=DISABLING in hbase:meta 2024-11-23T17:28:09,257 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemState to state=DISABLING 2024-11-23T17:28:09,258 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=108, ppid=107, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemState}] 2024-11-23T17:28:09,259 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=109, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=e2696e8b9a02ff803f56da2561e06e84, UNASSIGN}, {pid=110, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=d95e3a1e354a49895c35d856b3add21e, UNASSIGN}] 2024-11-23T17:28:09,259 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=110, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=d95e3a1e354a49895c35d856b3add21e, UNASSIGN 2024-11-23T17:28:09,260 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=109, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=e2696e8b9a02ff803f56da2561e06e84, UNASSIGN 2024-11-23T17:28:09,260 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=109 updating hbase:meta row=e2696e8b9a02ff803f56da2561e06e84, regionState=CLOSING, regionLocation=215bb12e999f,44667,1732382620918 2024-11-23T17:28:09,260 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=110 updating hbase:meta row=d95e3a1e354a49895c35d856b3add21e, regionState=CLOSING, regionLocation=215bb12e999f,45467,1732382620632 2024-11-23T17:28:09,262 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=109, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=e2696e8b9a02ff803f56da2561e06e84, UNASSIGN because future has completed 2024-11-23T17:28:09,262 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T17:28:09,262 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=111, ppid=109, state=RUNNABLE, hasLock=false; CloseRegionProcedure e2696e8b9a02ff803f56da2561e06e84, server=215bb12e999f,44667,1732382620918}] 2024-11-23T17:28:09,262 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=110, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=d95e3a1e354a49895c35d856b3add21e, UNASSIGN because future has completed 2024-11-23T17:28:09,263 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T17:28:09,263 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=112, ppid=110, state=RUNNABLE, hasLock=false; CloseRegionProcedure d95e3a1e354a49895c35d856b3add21e, server=215bb12e999f,45467,1732382620632}] 2024-11-23T17:28:09,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=107 2024-11-23T17:28:09,414 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] handler.UnassignRegionHandler(122): Close e2696e8b9a02ff803f56da2561e06e84 2024-11-23T17:28:09,415 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T17:28:09,415 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1722): Closing e2696e8b9a02ff803f56da2561e06e84, disabling compactions & flushes 2024-11-23T17:28:09,415 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,,1732382865637.e2696e8b9a02ff803f56da2561e06e84. 2024-11-23T17:28:09,415 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,,1732382865637.e2696e8b9a02ff803f56da2561e06e84. 2024-11-23T17:28:09,415 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,,1732382865637.e2696e8b9a02ff803f56da2561e06e84. after waiting 0 ms 2024-11-23T17:28:09,415 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,,1732382865637.e2696e8b9a02ff803f56da2561e06e84. 2024-11-23T17:28:09,420 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] handler.UnassignRegionHandler(122): Close d95e3a1e354a49895c35d856b3add21e 2024-11-23T17:28:09,421 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T17:28:09,421 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1722): Closing d95e3a1e354a49895c35d856b3add21e, disabling compactions & flushes 2024-11-23T17:28:09,421 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,1,1732382865637.d95e3a1e354a49895c35d856b3add21e. 2024-11-23T17:28:09,421 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,1,1732382865637.d95e3a1e354a49895c35d856b3add21e. 2024-11-23T17:28:09,421 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,1,1732382865637.d95e3a1e354a49895c35d856b3add21e. after waiting 0 ms 2024-11-23T17:28:09,421 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,1,1732382865637.d95e3a1e354a49895c35d856b3add21e. 2024-11-23T17:28:09,443 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemState/e2696e8b9a02ff803f56da2561e06e84/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-23T17:28:09,443 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemState/d95e3a1e354a49895c35d856b3add21e/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-23T17:28:09,444 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:28:09,444 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:28:09,444 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,1,1732382865637.d95e3a1e354a49895c35d856b3add21e. 2024-11-23T17:28:09,444 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,,1732382865637.e2696e8b9a02ff803f56da2561e06e84. 2024-11-23T17:28:09,444 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1676): Region close journal for e2696e8b9a02ff803f56da2561e06e84: Waiting for close lock at 1732382889415Running coprocessor pre-close hooks at 1732382889415Disabling compacts and flushes for region at 1732382889415Disabling writes for close at 1732382889415Writing region close event to WAL at 1732382889418 (+3 ms)Running coprocessor post-close hooks at 1732382889443 (+25 ms)Closed at 1732382889444 (+1 ms) 2024-11-23T17:28:09,444 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1676): Region close journal for d95e3a1e354a49895c35d856b3add21e: Waiting for close lock at 1732382889421Running coprocessor pre-close hooks at 1732382889421Disabling compacts and flushes for region at 1732382889421Disabling writes for close at 1732382889421Writing region close event to WAL at 1732382889429 (+8 ms)Running coprocessor post-close hooks at 1732382889444 (+15 ms)Closed at 1732382889444 2024-11-23T17:28:09,446 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] handler.UnassignRegionHandler(157): Closed e2696e8b9a02ff803f56da2561e06e84 2024-11-23T17:28:09,447 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=109 updating hbase:meta row=e2696e8b9a02ff803f56da2561e06e84, regionState=CLOSED 2024-11-23T17:28:09,448 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] handler.UnassignRegionHandler(157): Closed d95e3a1e354a49895c35d856b3add21e 2024-11-23T17:28:09,448 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=110 updating hbase:meta row=d95e3a1e354a49895c35d856b3add21e, regionState=CLOSED 2024-11-23T17:28:09,453 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=111, ppid=109, state=RUNNABLE, hasLock=false; CloseRegionProcedure e2696e8b9a02ff803f56da2561e06e84, server=215bb12e999f,44667,1732382620918 because future has completed 2024-11-23T17:28:09,454 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=112, ppid=110, state=RUNNABLE, hasLock=false; CloseRegionProcedure d95e3a1e354a49895c35d856b3add21e, server=215bb12e999f,45467,1732382620632 because future has completed 2024-11-23T17:28:09,457 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=111, resume processing ppid=109 2024-11-23T17:28:09,460 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=111, ppid=109, state=SUCCESS, hasLock=false; CloseRegionProcedure e2696e8b9a02ff803f56da2561e06e84, server=215bb12e999f,44667,1732382620918 in 192 msec 2024-11-23T17:28:09,461 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=109, ppid=108, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=e2696e8b9a02ff803f56da2561e06e84, UNASSIGN in 198 msec 2024-11-23T17:28:09,461 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=112, resume processing ppid=110 2024-11-23T17:28:09,461 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=112, ppid=110, state=SUCCESS, hasLock=false; CloseRegionProcedure d95e3a1e354a49895c35d856b3add21e, server=215bb12e999f,45467,1732382620632 in 193 msec 2024-11-23T17:28:09,464 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=110, resume processing ppid=108 2024-11-23T17:28:09,464 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=110, ppid=108, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=d95e3a1e354a49895c35d856b3add21e, UNASSIGN in 202 msec 2024-11-23T17:28:09,470 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=108, resume processing ppid=107 2024-11-23T17:28:09,470 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=108, ppid=107, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemState in 209 msec 2024-11-23T17:28:09,472 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732382889472"}]},"ts":"1732382889472"} 2024-11-23T17:28:09,474 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=DISABLED in hbase:meta 2024-11-23T17:28:09,474 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemState to state=DISABLED 2024-11-23T17:28:09,477 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=107, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemState in 222 msec 2024-11-23T17:28:09,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=107 2024-11-23T17:28:09,577 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemState completed 2024-11-23T17:28:09,577 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemState 2024-11-23T17:28:09,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure2.ProcedureExecutor(1139): Stored pid=113, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-23T17:28:09,580 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=113, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-23T17:28:09,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemState 2024-11-23T17:28:09,582 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=113, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-23T17:28:09,588 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41065 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemState 2024-11-23T17:28:09,592 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemState/e2696e8b9a02ff803f56da2561e06e84 2024-11-23T17:28:09,593 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemState/d95e3a1e354a49895c35d856b3add21e 2024-11-23T17:28:09,596 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemState/d95e3a1e354a49895c35d856b3add21e/cf, FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemState/d95e3a1e354a49895c35d856b3add21e/recovered.edits] 2024-11-23T17:28:09,596 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemState/e2696e8b9a02ff803f56da2561e06e84/cf, FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemState/e2696e8b9a02ff803f56da2561e06e84/recovered.edits] 2024-11-23T17:28:09,605 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemState/d95e3a1e354a49895c35d856b3add21e/cf/1aad4ed5ef6d43d7946cffe5f8c7917c to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/archive/data/default/testtb-testExportFileSystemState/d95e3a1e354a49895c35d856b3add21e/cf/1aad4ed5ef6d43d7946cffe5f8c7917c 2024-11-23T17:28:09,605 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemState/e2696e8b9a02ff803f56da2561e06e84/cf/40ff0bbee899478685c99d57ba4c658a to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/archive/data/default/testtb-testExportFileSystemState/e2696e8b9a02ff803f56da2561e06e84/cf/40ff0bbee899478685c99d57ba4c658a 2024-11-23T17:28:09,628 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemState/e2696e8b9a02ff803f56da2561e06e84/recovered.edits/9.seqid to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/archive/data/default/testtb-testExportFileSystemState/e2696e8b9a02ff803f56da2561e06e84/recovered.edits/9.seqid 2024-11-23T17:28:09,629 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemState/d95e3a1e354a49895c35d856b3add21e/recovered.edits/9.seqid to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/archive/data/default/testtb-testExportFileSystemState/d95e3a1e354a49895c35d856b3add21e/recovered.edits/9.seqid 2024-11-23T17:28:09,629 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemState/e2696e8b9a02ff803f56da2561e06e84 2024-11-23T17:28:09,630 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemState/d95e3a1e354a49895c35d856b3add21e 2024-11-23T17:28:09,630 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemState regions 2024-11-23T17:28:09,634 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=113, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-23T17:28:09,638 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemState from hbase:meta 2024-11-23T17:28:09,645 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemState' descriptor. 2024-11-23T17:28:09,646 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=113, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-23T17:28:09,647 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemState' from region states. 2024-11-23T17:28:09,647 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState,,1732382865637.e2696e8b9a02ff803f56da2561e06e84.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732382889647"}]},"ts":"9223372036854775807"} 2024-11-23T17:28:09,647 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState,1,1732382865637.d95e3a1e354a49895c35d856b3add21e.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732382889647"}]},"ts":"9223372036854775807"} 2024-11-23T17:28:09,649 INFO [PEWorker-4 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-23T17:28:09,649 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => e2696e8b9a02ff803f56da2561e06e84, NAME => 'testtb-testExportFileSystemState,,1732382865637.e2696e8b9a02ff803f56da2561e06e84.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => d95e3a1e354a49895c35d856b3add21e, NAME => 'testtb-testExportFileSystemState,1,1732382865637.d95e3a1e354a49895c35d856b3add21e.', STARTKEY => '1', ENDKEY => ''}] 2024-11-23T17:28:09,649 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemState' as deleted. 2024-11-23T17:28:09,650 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732382889649"}]},"ts":"9223372036854775807"} 2024-11-23T17:28:09,650 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44667-0x10168b8ee290002, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-23T17:28:09,650 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45467-0x10168b8ee290001, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-23T17:28:09,650 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x10168b8ee290003, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-23T17:28:09,650 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-23T17:28:09,651 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-11-23T17:28:09,651 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-11-23T17:28:09,651 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-11-23T17:28:09,651 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-11-23T17:28:09,653 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemState state from META 2024-11-23T17:28:09,653 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=113, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-23T17:28:09,655 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=113, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemState in 75 msec 2024-11-23T17:28:09,661 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45467-0x10168b8ee290001, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-23T17:28:09,661 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x10168b8ee290003, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-23T17:28:09,661 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45467-0x10168b8ee290001, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:28:09,661 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x10168b8ee290003, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:28:09,661 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44667-0x10168b8ee290002, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-23T17:28:09,662 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44667-0x10168b8ee290002, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:28:09,662 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-23T17:28:09,662 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:28:09,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=113 2024-11-23T17:28:09,663 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemState 2024-11-23T17:28:09,663 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemState completed 2024-11-23T17:28:09,700 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemState" type: DISABLED 2024-11-23T17:28:09,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemState 2024-11-23T17:28:09,704 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemState" type: DISABLED 2024-11-23T17:28:09,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemState 2024-11-23T17:28:09,735 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemState Thread=801 (was 792) Potentially hanging thread: ApplicationMasterLauncher #5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1549690208) connection to localhost/127.0.0.1:45625 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HFileArchiver-12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 109918) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_616964029_1 at /127.0.0.1:54540 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1325804072_22 at /127.0.0.1:54568 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36445 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-3597 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45625 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_616964029_1 at /127.0.0.1:49282 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-13 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1325804072_22 at /127.0.0.1:49294 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1325804072_22 at /127.0.0.1:40040 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=809 (was 809), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1320 (was 1386), ProcessCount=21 (was 32), AvailableMemoryMB=2430 (was 859) - AvailableMemoryMB LEAK? - 2024-11-23T17:28:09,735 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=801 is superior to 500 2024-11-23T17:28:09,752 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testConsecutiveExports Thread=801, OpenFileDescriptor=809, MaxFileDescriptor=1048576, SystemLoadAverage=1320, ProcessCount=23, AvailableMemoryMB=2429 2024-11-23T17:28:09,752 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=801 is superior to 500 2024-11-23T17:28:09,754 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T17:28:09,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure2.ProcedureExecutor(1139): Stored pid=114, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testConsecutiveExports 2024-11-23T17:28:09,757 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T17:28:09,757 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:28:09,757 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testConsecutiveExports" procId is: 114 2024-11-23T17:28:09,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-11-23T17:28:09,760 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T17:28:09,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742068_1244 (size=404) 2024-11-23T17:28:09,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742068_1244 (size=404) 2024-11-23T17:28:09,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742068_1244 (size=404) 2024-11-23T17:28:09,774 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => cf70026560edaaccf8b1409145592395, NAME => 'testtb-testConsecutiveExports,,1732382889753.cf70026560edaaccf8b1409145592395.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf 2024-11-23T17:28:09,774 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 16f732981a718b8fb768b4a18e150614, NAME => 'testtb-testConsecutiveExports,1,1732382889753.16f732981a718b8fb768b4a18e150614.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf 2024-11-23T17:28:09,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742069_1245 (size=65) 2024-11-23T17:28:09,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742069_1245 (size=65) 2024-11-23T17:28:09,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742069_1245 (size=65) 2024-11-23T17:28:09,803 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,,1732382889753.cf70026560edaaccf8b1409145592395.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:28:09,803 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1722): Closing cf70026560edaaccf8b1409145592395, disabling compactions & flushes 2024-11-23T17:28:09,803 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,,1732382889753.cf70026560edaaccf8b1409145592395. 2024-11-23T17:28:09,803 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,,1732382889753.cf70026560edaaccf8b1409145592395. 2024-11-23T17:28:09,804 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,,1732382889753.cf70026560edaaccf8b1409145592395. after waiting 0 ms 2024-11-23T17:28:09,804 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,,1732382889753.cf70026560edaaccf8b1409145592395. 2024-11-23T17:28:09,804 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,,1732382889753.cf70026560edaaccf8b1409145592395. 2024-11-23T17:28:09,804 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1676): Region close journal for cf70026560edaaccf8b1409145592395: Waiting for close lock at 1732382889803Disabling compacts and flushes for region at 1732382889803Disabling writes for close at 1732382889804 (+1 ms)Writing region close event to WAL at 1732382889804Closed at 1732382889804 2024-11-23T17:28:09,806 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState 2024-11-23T17:28:09,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742070_1246 (size=65) 2024-11-23T17:28:09,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742070_1246 (size=65) 2024-11-23T17:28:09,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742070_1246 (size=65) 2024-11-23T17:28:09,814 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,1,1732382889753.16f732981a718b8fb768b4a18e150614.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:28:09,814 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1722): Closing 16f732981a718b8fb768b4a18e150614, disabling compactions & flushes 2024-11-23T17:28:09,814 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,1,1732382889753.16f732981a718b8fb768b4a18e150614. 2024-11-23T17:28:09,814 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,1,1732382889753.16f732981a718b8fb768b4a18e150614. 2024-11-23T17:28:09,814 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,1,1732382889753.16f732981a718b8fb768b4a18e150614. after waiting 0 ms 2024-11-23T17:28:09,814 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,1,1732382889753.16f732981a718b8fb768b4a18e150614. 2024-11-23T17:28:09,814 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,1,1732382889753.16f732981a718b8fb768b4a18e150614. 2024-11-23T17:28:09,814 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1676): Region close journal for 16f732981a718b8fb768b4a18e150614: Waiting for close lock at 1732382889814Disabling compacts and flushes for region at 1732382889814Disabling writes for close at 1732382889814Writing region close event to WAL at 1732382889814Closed at 1732382889814 2024-11-23T17:28:09,816 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T17:28:09,816 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testConsecutiveExports,,1732382889753.cf70026560edaaccf8b1409145592395.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1732382889816"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732382889816"}]},"ts":"1732382889816"} 2024-11-23T17:28:09,816 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testConsecutiveExports,1,1732382889753.16f732981a718b8fb768b4a18e150614.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1732382889816"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732382889816"}]},"ts":"1732382889816"} 2024-11-23T17:28:09,820 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-23T17:28:09,821 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T17:28:09,822 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732382889821"}]},"ts":"1732382889821"} 2024-11-23T17:28:09,825 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=ENABLING in hbase:meta 2024-11-23T17:28:09,826 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {215bb12e999f=0} racks are {/default-rack=0} 2024-11-23T17:28:09,828 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-23T17:28:09,828 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-23T17:28:09,828 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-23T17:28:09,828 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-23T17:28:09,828 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-23T17:28:09,828 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-23T17:28:09,828 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-23T17:28:09,828 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-23T17:28:09,828 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-23T17:28:09,828 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-23T17:28:09,829 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=115, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=cf70026560edaaccf8b1409145592395, ASSIGN}, {pid=116, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=16f732981a718b8fb768b4a18e150614, ASSIGN}] 2024-11-23T17:28:09,837 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=115, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=cf70026560edaaccf8b1409145592395, ASSIGN 2024-11-23T17:28:09,837 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=116, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=16f732981a718b8fb768b4a18e150614, ASSIGN 2024-11-23T17:28:09,841 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=115, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=cf70026560edaaccf8b1409145592395, ASSIGN; state=OFFLINE, location=215bb12e999f,44667,1732382620918; forceNewPlan=false, retain=false 2024-11-23T17:28:09,841 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=116, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=16f732981a718b8fb768b4a18e150614, ASSIGN; state=OFFLINE, location=215bb12e999f,45467,1732382620632; forceNewPlan=false, retain=false 2024-11-23T17:28:09,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-11-23T17:28:09,992 INFO [215bb12e999f:39621 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-23T17:28:09,993 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=116 updating hbase:meta row=16f732981a718b8fb768b4a18e150614, regionState=OPENING, regionLocation=215bb12e999f,45467,1732382620632 2024-11-23T17:28:09,993 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=115 updating hbase:meta row=cf70026560edaaccf8b1409145592395, regionState=OPENING, regionLocation=215bb12e999f,44667,1732382620918 2024-11-23T17:28:09,999 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=115, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=cf70026560edaaccf8b1409145592395, ASSIGN because future has completed 2024-11-23T17:28:10,000 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=117, ppid=115, state=RUNNABLE, hasLock=false; OpenRegionProcedure cf70026560edaaccf8b1409145592395, server=215bb12e999f,44667,1732382620918}] 2024-11-23T17:28:10,001 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=116, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=16f732981a718b8fb768b4a18e150614, ASSIGN because future has completed 2024-11-23T17:28:10,001 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=118, ppid=116, state=RUNNABLE, hasLock=false; OpenRegionProcedure 16f732981a718b8fb768b4a18e150614, server=215bb12e999f,45467,1732382620632}] 2024-11-23T17:28:10,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-11-23T17:28:10,158 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] handler.AssignRegionHandler(132): Open testtb-testConsecutiveExports,1,1732382889753.16f732981a718b8fb768b4a18e150614. 2024-11-23T17:28:10,158 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(7752): Opening region: {ENCODED => 16f732981a718b8fb768b4a18e150614, NAME => 'testtb-testConsecutiveExports,1,1732382889753.16f732981a718b8fb768b4a18e150614.', STARTKEY => '1', ENDKEY => ''} 2024-11-23T17:28:10,159 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testConsecutiveExports,1,1732382889753.16f732981a718b8fb768b4a18e150614. service=AccessControlService 2024-11-23T17:28:10,159 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:28:10,159 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testConsecutiveExports 16f732981a718b8fb768b4a18e150614 2024-11-23T17:28:10,159 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,1,1732382889753.16f732981a718b8fb768b4a18e150614.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:28:10,159 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(7794): checking encryption for 16f732981a718b8fb768b4a18e150614 2024-11-23T17:28:10,160 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(7797): checking classloading for 16f732981a718b8fb768b4a18e150614 2024-11-23T17:28:10,170 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] handler.AssignRegionHandler(132): Open testtb-testConsecutiveExports,,1732382889753.cf70026560edaaccf8b1409145592395. 2024-11-23T17:28:10,170 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(7752): Opening region: {ENCODED => cf70026560edaaccf8b1409145592395, NAME => 'testtb-testConsecutiveExports,,1732382889753.cf70026560edaaccf8b1409145592395.', STARTKEY => '', ENDKEY => '1'} 2024-11-23T17:28:10,171 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testConsecutiveExports,,1732382889753.cf70026560edaaccf8b1409145592395. service=AccessControlService 2024-11-23T17:28:10,171 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:28:10,171 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testConsecutiveExports cf70026560edaaccf8b1409145592395 2024-11-23T17:28:10,171 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,,1732382889753.cf70026560edaaccf8b1409145592395.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:28:10,171 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(7794): checking encryption for cf70026560edaaccf8b1409145592395 2024-11-23T17:28:10,171 INFO [StoreOpener-16f732981a718b8fb768b4a18e150614-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 16f732981a718b8fb768b4a18e150614 2024-11-23T17:28:10,171 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(7797): checking classloading for cf70026560edaaccf8b1409145592395 2024-11-23T17:28:10,174 INFO [StoreOpener-16f732981a718b8fb768b4a18e150614-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 16f732981a718b8fb768b4a18e150614 columnFamilyName cf 2024-11-23T17:28:10,174 DEBUG [StoreOpener-16f732981a718b8fb768b4a18e150614-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:28:10,180 INFO [StoreOpener-16f732981a718b8fb768b4a18e150614-1 {}] regionserver.HStore(327): Store=16f732981a718b8fb768b4a18e150614/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T17:28:10,180 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1038): replaying wal for 16f732981a718b8fb768b4a18e150614 2024-11-23T17:28:10,181 INFO [StoreOpener-cf70026560edaaccf8b1409145592395-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region cf70026560edaaccf8b1409145592395 2024-11-23T17:28:10,181 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testConsecutiveExports/16f732981a718b8fb768b4a18e150614 2024-11-23T17:28:10,182 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testConsecutiveExports/16f732981a718b8fb768b4a18e150614 2024-11-23T17:28:10,183 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1048): stopping wal replay for 16f732981a718b8fb768b4a18e150614 2024-11-23T17:28:10,183 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1060): Cleaning up temporary data for 16f732981a718b8fb768b4a18e150614 2024-11-23T17:28:10,188 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1093): writing seq id for 16f732981a718b8fb768b4a18e150614 2024-11-23T17:28:10,191 INFO [StoreOpener-cf70026560edaaccf8b1409145592395-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cf70026560edaaccf8b1409145592395 columnFamilyName cf 2024-11-23T17:28:10,191 DEBUG [StoreOpener-cf70026560edaaccf8b1409145592395-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:28:10,192 INFO [StoreOpener-cf70026560edaaccf8b1409145592395-1 {}] regionserver.HStore(327): Store=cf70026560edaaccf8b1409145592395/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T17:28:10,193 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1038): replaying wal for cf70026560edaaccf8b1409145592395 2024-11-23T17:28:10,193 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testConsecutiveExports/cf70026560edaaccf8b1409145592395 2024-11-23T17:28:10,194 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testConsecutiveExports/cf70026560edaaccf8b1409145592395 2024-11-23T17:28:10,194 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1048): stopping wal replay for cf70026560edaaccf8b1409145592395 2024-11-23T17:28:10,194 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1060): Cleaning up temporary data for cf70026560edaaccf8b1409145592395 2024-11-23T17:28:10,196 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1093): writing seq id for cf70026560edaaccf8b1409145592395 2024-11-23T17:28:10,199 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testConsecutiveExports/16f732981a718b8fb768b4a18e150614/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T17:28:10,200 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1114): Opened 16f732981a718b8fb768b4a18e150614; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67832041, jitterRate=0.010776177048683167}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T17:28:10,200 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 16f732981a718b8fb768b4a18e150614 2024-11-23T17:28:10,201 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1006): Region open journal for 16f732981a718b8fb768b4a18e150614: Running coprocessor pre-open hook at 1732382890160Writing region info on filesystem at 1732382890160Initializing all the Stores at 1732382890167 (+7 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732382890167Cleaning up temporary data from old regions at 1732382890183 (+16 ms)Running coprocessor post-open hooks at 1732382890200 (+17 ms)Region opened successfully at 1732382890201 (+1 ms) 2024-11-23T17:28:10,203 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testConsecutiveExports,1,1732382889753.16f732981a718b8fb768b4a18e150614., pid=118, masterSystemTime=1732382890154 2024-11-23T17:28:10,209 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testConsecutiveExports/cf70026560edaaccf8b1409145592395/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T17:28:10,210 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1114): Opened cf70026560edaaccf8b1409145592395; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60087431, jitterRate=-0.10462750494480133}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T17:28:10,210 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1122): Running coprocessor post-open hooks for cf70026560edaaccf8b1409145592395 2024-11-23T17:28:10,210 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1006): Region open journal for cf70026560edaaccf8b1409145592395: Running coprocessor pre-open hook at 1732382890172Writing region info on filesystem at 1732382890172Initializing all the Stores at 1732382890173 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732382890173Cleaning up temporary data from old regions at 1732382890194 (+21 ms)Running coprocessor post-open hooks at 1732382890210 (+16 ms)Region opened successfully at 1732382890210 2024-11-23T17:28:10,211 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testConsecutiveExports,,1732382889753.cf70026560edaaccf8b1409145592395., pid=117, masterSystemTime=1732382890153 2024-11-23T17:28:10,213 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testConsecutiveExports,1,1732382889753.16f732981a718b8fb768b4a18e150614. 2024-11-23T17:28:10,213 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] handler.AssignRegionHandler(153): Opened testtb-testConsecutiveExports,1,1732382889753.16f732981a718b8fb768b4a18e150614. 2024-11-23T17:28:10,214 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testConsecutiveExports,,1732382889753.cf70026560edaaccf8b1409145592395. 2024-11-23T17:28:10,214 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] handler.AssignRegionHandler(153): Opened testtb-testConsecutiveExports,,1732382889753.cf70026560edaaccf8b1409145592395. 2024-11-23T17:28:10,217 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=115 updating hbase:meta row=cf70026560edaaccf8b1409145592395, regionState=OPEN, openSeqNum=2, regionLocation=215bb12e999f,44667,1732382620918 2024-11-23T17:28:10,217 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=116 updating hbase:meta row=16f732981a718b8fb768b4a18e150614, regionState=OPEN, openSeqNum=2, regionLocation=215bb12e999f,45467,1732382620632 2024-11-23T17:28:10,219 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=118, ppid=116, state=RUNNABLE, hasLock=false; OpenRegionProcedure 16f732981a718b8fb768b4a18e150614, server=215bb12e999f,45467,1732382620632 because future has completed 2024-11-23T17:28:10,220 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=117, ppid=115, state=RUNNABLE, hasLock=false; OpenRegionProcedure cf70026560edaaccf8b1409145592395, server=215bb12e999f,44667,1732382620918 because future has completed 2024-11-23T17:28:10,228 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=117, resume processing ppid=115 2024-11-23T17:28:10,236 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=118, resume processing ppid=116 2024-11-23T17:28:10,236 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=118, ppid=116, state=SUCCESS, hasLock=false; OpenRegionProcedure 16f732981a718b8fb768b4a18e150614, server=215bb12e999f,45467,1732382620632 in 223 msec 2024-11-23T17:28:10,240 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=115, ppid=114, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=cf70026560edaaccf8b1409145592395, ASSIGN in 401 msec 2024-11-23T17:28:10,240 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=117, ppid=115, state=SUCCESS, hasLock=false; OpenRegionProcedure cf70026560edaaccf8b1409145592395, server=215bb12e999f,44667,1732382620918 in 225 msec 2024-11-23T17:28:10,245 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=116, resume processing ppid=114 2024-11-23T17:28:10,245 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=116, ppid=114, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=16f732981a718b8fb768b4a18e150614, ASSIGN in 408 msec 2024-11-23T17:28:10,250 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T17:28:10,250 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732382890250"}]},"ts":"1732382890250"} 2024-11-23T17:28:10,256 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=ENABLED in hbase:meta 2024-11-23T17:28:10,265 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T17:28:10,265 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testConsecutiveExports jenkins: RWXCA 2024-11-23T17:28:10,270 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41065 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-11-23T17:28:10,313 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45467-0x10168b8ee290001, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:28:10,313 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:28:10,313 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x10168b8ee290003, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:28:10,314 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44667-0x10168b8ee290002, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:28:10,324 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-11-23T17:28:10,324 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-11-23T17:28:10,324 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-11-23T17:28:10,325 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-11-23T17:28:10,327 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=114, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testConsecutiveExports in 571 msec 2024-11-23T17:28:10,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-11-23T17:28:10,388 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testConsecutiveExports completed 2024-11-23T17:28:10,388 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testConsecutiveExports get assigned. Timeout = 60000ms 2024-11-23T17:28:10,388 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T17:28:10,392 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testConsecutiveExports assigned to meta. Checking AM states. 2024-11-23T17:28:10,392 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T17:28:10,392 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testConsecutiveExports assigned. 2024-11-23T17:28:10,392 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-23T17:28:10,396 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-11-23T17:28:10,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732382890396 (current time:1732382890396). 2024-11-23T17:28:10,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-23T17:28:10,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testConsecutiveExports VERSION not specified, setting to 2 2024-11-23T17:28:10,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-23T17:28:10,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@418c4334, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:28:10,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.ClusterIdFetcher(90): Going to request 215bb12e999f,39621,-1 for getting cluster id 2024-11-23T17:28:10,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:28:10,398 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d' 2024-11-23T17:28:10,398 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:28:10,398 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d" 2024-11-23T17:28:10,398 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1284010d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:28:10,398 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [215bb12e999f,39621,-1] 2024-11-23T17:28:10,398 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:28:10,399 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:28:10,399 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60024, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:28:10,400 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7893df39, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:28:10,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:28:10,401 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=215bb12e999f,44667,1732382620918, seqNum=-1] 2024-11-23T17:28:10,401 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:28:10,402 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47264, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:28:10,405 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621. 2024-11-23T17:28:10,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:28:10,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:28:10,405 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:28:10,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:28:10,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@66c55c19, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:28:10,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.ClusterIdFetcher(90): Going to request 215bb12e999f,39621,-1 for getting cluster id 2024-11-23T17:28:10,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:28:10,410 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d' 2024-11-23T17:28:10,410 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:28:10,410 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d" 2024-11-23T17:28:10,410 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b22437b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:28:10,410 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [215bb12e999f,39621,-1] 2024-11-23T17:28:10,411 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:28:10,411 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:28:10,412 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60042, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:28:10,412 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7afa434c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:28:10,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:28:10,414 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=215bb12e999f,44667,1732382620918, seqNum=-1] 2024-11-23T17:28:10,414 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:28:10,415 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47270, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:28:10,420 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testConsecutiveExports', locateType=CURRENT is [region=hbase:acl,,1732382625522.41264850465b123187f03a78b2521b39., hostname=215bb12e999f,41065,1732382621000, seqNum=2] 2024-11-23T17:28:10,421 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:28:10,422 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51868, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:28:10,423 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621. 2024-11-23T17:28:10,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor297.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:28:10,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:28:10,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:28:10,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-11-23T17:28:10,424 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:28:10,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-23T17:28:10,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure2.ProcedureExecutor(1139): Stored pid=119, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-11-23T17:28:10,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 119 2024-11-23T17:28:10,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-11-23T17:28:10,427 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-23T17:28:10,428 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-23T17:28:10,442 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-23T17:28:10,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742071_1247 (size=161) 2024-11-23T17:28:10,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742071_1247 (size=161) 2024-11-23T17:28:10,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742071_1247 (size=161) 2024-11-23T17:28:10,509 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-23T17:28:10,509 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=120, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure cf70026560edaaccf8b1409145592395}, {pid=121, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 16f732981a718b8fb768b4a18e150614}] 2024-11-23T17:28:10,511 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=120, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure cf70026560edaaccf8b1409145592395 2024-11-23T17:28:10,511 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=121, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 16f732981a718b8fb768b4a18e150614 2024-11-23T17:28:10,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-11-23T17:28:10,552 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-1_2/usercache/jenkins/appcache/application_1732382632246_0004/container_1732382632246_0004_01_000002/launch_container.sh] 2024-11-23T17:28:10,552 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-1_2/usercache/jenkins/appcache/application_1732382632246_0004/container_1732382632246_0004_01_000002/container_tokens] 2024-11-23T17:28:10,552 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-1_2/usercache/jenkins/appcache/application_1732382632246_0004/container_1732382632246_0004_01_000002/sysfs] 2024-11-23T17:28:10,663 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44667 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=120 2024-11-23T17:28:10,663 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45467 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=121 2024-11-23T17:28:10,663 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,1,1732382889753.16f732981a718b8fb768b4a18e150614. 2024-11-23T17:28:10,663 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.HRegion(2603): Flush status journal for 16f732981a718b8fb768b4a18e150614: 2024-11-23T17:28:10,663 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,1,1732382889753.16f732981a718b8fb768b4a18e150614. for emptySnaptb0-testConsecutiveExports completed. 2024-11-23T17:28:10,664 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,,1732382889753.cf70026560edaaccf8b1409145592395. 2024-11-23T17:28:10,664 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,1,1732382889753.16f732981a718b8fb768b4a18e150614.' region-info for snapshot=emptySnaptb0-testConsecutiveExports 2024-11-23T17:28:10,664 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:28:10,664 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.HRegion(2603): Flush status journal for cf70026560edaaccf8b1409145592395: 2024-11-23T17:28:10,664 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-23T17:28:10,664 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,,1732382889753.cf70026560edaaccf8b1409145592395. for emptySnaptb0-testConsecutiveExports completed. 2024-11-23T17:28:10,664 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,,1732382889753.cf70026560edaaccf8b1409145592395.' region-info for snapshot=emptySnaptb0-testConsecutiveExports 2024-11-23T17:28:10,664 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:28:10,664 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-23T17:28:10,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742072_1248 (size=68) 2024-11-23T17:28:10,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742072_1248 (size=68) 2024-11-23T17:28:10,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742072_1248 (size=68) 2024-11-23T17:28:10,733 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,1,1732382889753.16f732981a718b8fb768b4a18e150614. 2024-11-23T17:28:10,733 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=121 2024-11-23T17:28:10,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.HMaster(4169): Remote procedure done, pid=121 2024-11-23T17:28:10,734 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testConsecutiveExports on region 16f732981a718b8fb768b4a18e150614 2024-11-23T17:28:10,735 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=121, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 16f732981a718b8fb768b4a18e150614 2024-11-23T17:28:10,739 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=121, ppid=119, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 16f732981a718b8fb768b4a18e150614 in 228 msec 2024-11-23T17:28:10,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742073_1249 (size=68) 2024-11-23T17:28:10,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742073_1249 (size=68) 2024-11-23T17:28:10,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742073_1249 (size=68) 2024-11-23T17:28:10,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-11-23T17:28:10,748 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,,1732382889753.cf70026560edaaccf8b1409145592395. 2024-11-23T17:28:10,748 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=120 2024-11-23T17:28:10,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.HMaster(4169): Remote procedure done, pid=120 2024-11-23T17:28:10,748 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testConsecutiveExports on region cf70026560edaaccf8b1409145592395 2024-11-23T17:28:10,749 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=120, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure cf70026560edaaccf8b1409145592395 2024-11-23T17:28:10,767 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=120, resume processing ppid=119 2024-11-23T17:28:10,767 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-23T17:28:10,767 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=120, ppid=119, state=SUCCESS, hasLock=false; SnapshotRegionProcedure cf70026560edaaccf8b1409145592395 in 243 msec 2024-11-23T17:28:10,768 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-23T17:28:10,770 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-23T17:28:10,770 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testConsecutiveExports 2024-11-23T17:28:10,771 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/.tmp/emptySnaptb0-testConsecutiveExports 2024-11-23T17:28:10,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742074_1250 (size=543) 2024-11-23T17:28:10,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742074_1250 (size=543) 2024-11-23T17:28:10,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742074_1250 (size=543) 2024-11-23T17:28:10,865 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-23T17:28:10,877 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-23T17:28:10,878 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/.tmp/emptySnaptb0-testConsecutiveExports to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/emptySnaptb0-testConsecutiveExports 2024-11-23T17:28:10,880 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-23T17:28:10,881 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 119 2024-11-23T17:28:10,883 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=119, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } in 457 msec 2024-11-23T17:28:11,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-11-23T17:28:11,057 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testConsecutiveExports completed 2024-11-23T17:28:11,070 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='030ea9eaa1c5d6782f6ace0738369bb79', locateType=CURRENT is [region=testtb-testConsecutiveExports,,1732382889753.cf70026560edaaccf8b1409145592395., hostname=215bb12e999f,44667,1732382620918, seqNum=2] 2024-11-23T17:28:11,071 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='123fc68c4c973d8493e9d16ecdff2d5ef', locateType=CURRENT is [region=testtb-testConsecutiveExports,1,1732382889753.16f732981a718b8fb768b4a18e150614., hostname=215bb12e999f,45467,1732382620632, seqNum=2] 2024-11-23T17:28:11,072 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='2f6d5c24a955c85681a0f094cb01aaffc', locateType=CURRENT is [region=testtb-testConsecutiveExports,1,1732382889753.16f732981a718b8fb768b4a18e150614., hostname=215bb12e999f,45467,1732382620632, seqNum=2] 2024-11-23T17:28:11,077 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='3b63dd4e5cdb4ea6872b7f08d42ebc8cf', locateType=CURRENT is [region=testtb-testConsecutiveExports,1,1732382889753.16f732981a718b8fb768b4a18e150614., hostname=215bb12e999f,45467,1732382620632, seqNum=2] 2024-11-23T17:28:11,085 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44667 {}] regionserver.HRegion(8528): writing data to region testtb-testConsecutiveExports,,1732382889753.cf70026560edaaccf8b1409145592395. with WAL disabled. Data may be lost in the event of a crash. 2024-11-23T17:28:11,099 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45467 {}] regionserver.HRegion(8528): writing data to region testtb-testConsecutiveExports,1,1732382889753.16f732981a718b8fb768b4a18e150614. with WAL disabled. Data may be lost in the event of a crash. 2024-11-23T17:28:11,102 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-23T17:28:11,106 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testConsecutiveExports 2024-11-23T17:28:11,106 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testConsecutiveExports,,1732382889753.cf70026560edaaccf8b1409145592395. 2024-11-23T17:28:11,108 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T17:28:11,114 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-23T17:28:11,125 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-23T17:28:11,135 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-23T17:28:11,141 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-11-23T17:28:11,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732382891141 (current time:1732382891141). 2024-11-23T17:28:11,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-23T17:28:11,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testConsecutiveExports VERSION not specified, setting to 2 2024-11-23T17:28:11,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-23T17:28:11,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@68ffebb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:28:11,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.ClusterIdFetcher(90): Going to request 215bb12e999f,39621,-1 for getting cluster id 2024-11-23T17:28:11,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:28:11,155 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d' 2024-11-23T17:28:11,155 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:28:11,155 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d" 2024-11-23T17:28:11,155 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@85f349c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:28:11,155 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [215bb12e999f,39621,-1] 2024-11-23T17:28:11,156 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:28:11,156 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:28:11,157 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60058, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:28:11,158 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1aeba382, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:28:11,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:28:11,160 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=215bb12e999f,44667,1732382620918, seqNum=-1] 2024-11-23T17:28:11,161 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:28:11,162 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47284, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:28:11,164 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621. 2024-11-23T17:28:11,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:28:11,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:28:11,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:28:11,165 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:28:11,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7ef7ea62, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:28:11,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.ClusterIdFetcher(90): Going to request 215bb12e999f,39621,-1 for getting cluster id 2024-11-23T17:28:11,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:28:11,175 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d' 2024-11-23T17:28:11,175 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:28:11,175 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d" 2024-11-23T17:28:11,176 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4359aefb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:28:11,176 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [215bb12e999f,39621,-1] 2024-11-23T17:28:11,176 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:28:11,177 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:28:11,178 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60086, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:28:11,179 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c96fa18, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:28:11,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:28:11,182 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=215bb12e999f,44667,1732382620918, seqNum=-1] 2024-11-23T17:28:11,182 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:28:11,184 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47300, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:28:11,186 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testConsecutiveExports', locateType=CURRENT is [region=hbase:acl,,1732382625522.41264850465b123187f03a78b2521b39., hostname=215bb12e999f,41065,1732382621000, seqNum=2] 2024-11-23T17:28:11,186 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:28:11,190 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51876, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:28:11,191 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621. 2024-11-23T17:28:11,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor297.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:28:11,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:28:11,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:28:11,192 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:28:11,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-11-23T17:28:11,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-23T17:28:11,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure2.ProcedureExecutor(1139): Stored pid=122, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-11-23T17:28:11,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 122 2024-11-23T17:28:11,206 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-23T17:28:11,207 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-23T17:28:11,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-11-23T17:28:11,211 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-23T17:28:11,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742075_1251 (size=156) 2024-11-23T17:28:11,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742075_1251 (size=156) 2024-11-23T17:28:11,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742075_1251 (size=156) 2024-11-23T17:28:11,256 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-23T17:28:11,256 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=123, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure cf70026560edaaccf8b1409145592395}, {pid=124, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 16f732981a718b8fb768b4a18e150614}] 2024-11-23T17:28:11,258 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=123, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure cf70026560edaaccf8b1409145592395 2024-11-23T17:28:11,259 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=124, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 16f732981a718b8fb768b4a18e150614 2024-11-23T17:28:11,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-11-23T17:28:11,412 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44667 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=123 2024-11-23T17:28:11,412 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,,1732382889753.cf70026560edaaccf8b1409145592395. 2024-11-23T17:28:11,412 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45467 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=124 2024-11-23T17:28:11,412 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegion(2902): Flushing cf70026560edaaccf8b1409145592395 1/1 column families, dataSize=199 B heapSize=688 B 2024-11-23T17:28:11,412 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,1,1732382889753.16f732981a718b8fb768b4a18e150614. 2024-11-23T17:28:11,413 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HRegion(2902): Flushing 16f732981a718b8fb768b4a18e150614 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-11-23T17:28:11,463 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testConsecutiveExports/cf70026560edaaccf8b1409145592395/.tmp/cf/69b3c627daef4488a1595bbe24fac21e is 71, key is 061be6f9f244fdc7e69b36244e82bc75/cf:q/1732382891084/Put/seqid=0 2024-11-23T17:28:11,485 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testConsecutiveExports/16f732981a718b8fb768b4a18e150614/.tmp/cf/f51ec1c979ce4f99b5650f4007374a17 is 71, key is 120185ae733d0246074afa48b4722332/cf:q/1732382891098/Put/seqid=0 2024-11-23T17:28:11,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-11-23T17:28:11,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742076_1252 (size=5288) 2024-11-23T17:28:11,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742076_1252 (size=5288) 2024-11-23T17:28:11,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742076_1252 (size=5288) 2024-11-23T17:28:11,537 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=199 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testConsecutiveExports/cf70026560edaaccf8b1409145592395/.tmp/cf/69b3c627daef4488a1595bbe24fac21e 2024-11-23T17:28:11,564 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testConsecutiveExports/cf70026560edaaccf8b1409145592395/.tmp/cf/69b3c627daef4488a1595bbe24fac21e as hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testConsecutiveExports/cf70026560edaaccf8b1409145592395/cf/69b3c627daef4488a1595bbe24fac21e 2024-11-23T17:28:11,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742077_1253 (size=8324) 2024-11-23T17:28:11,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742077_1253 (size=8324) 2024-11-23T17:28:11,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742077_1253 (size=8324) 2024-11-23T17:28:11,575 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testConsecutiveExports/cf70026560edaaccf8b1409145592395/cf/69b3c627daef4488a1595bbe24fac21e, entries=3, sequenceid=6, filesize=5.2 K 2024-11-23T17:28:11,577 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.06 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testConsecutiveExports/16f732981a718b8fb768b4a18e150614/.tmp/cf/f51ec1c979ce4f99b5650f4007374a17 2024-11-23T17:28:11,586 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testConsecutiveExports/16f732981a718b8fb768b4a18e150614/.tmp/cf/f51ec1c979ce4f99b5650f4007374a17 as hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testConsecutiveExports/16f732981a718b8fb768b4a18e150614/cf/f51ec1c979ce4f99b5650f4007374a17 2024-11-23T17:28:11,588 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegion(3140): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for cf70026560edaaccf8b1409145592395 in 176ms, sequenceid=6, compaction requested=false 2024-11-23T17:28:11,589 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testConsecutiveExports' 2024-11-23T17:28:11,589 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegion(2603): Flush status journal for cf70026560edaaccf8b1409145592395: 2024-11-23T17:28:11,590 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,,1732382889753.cf70026560edaaccf8b1409145592395. for snaptb0-testConsecutiveExports completed. 2024-11-23T17:28:11,590 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,,1732382889753.cf70026560edaaccf8b1409145592395.' region-info for snapshot=snaptb0-testConsecutiveExports 2024-11-23T17:28:11,590 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:28:11,590 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testConsecutiveExports/cf70026560edaaccf8b1409145592395/cf/69b3c627daef4488a1595bbe24fac21e] hfiles 2024-11-23T17:28:11,590 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testConsecutiveExports/cf70026560edaaccf8b1409145592395/cf/69b3c627daef4488a1595bbe24fac21e for snapshot=snaptb0-testConsecutiveExports 2024-11-23T17:28:11,599 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testConsecutiveExports/16f732981a718b8fb768b4a18e150614/cf/f51ec1c979ce4f99b5650f4007374a17, entries=47, sequenceid=6, filesize=8.1 K 2024-11-23T17:28:11,606 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HRegion(3140): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for 16f732981a718b8fb768b4a18e150614 in 193ms, sequenceid=6, compaction requested=false 2024-11-23T17:28:11,606 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HRegion(2603): Flush status journal for 16f732981a718b8fb768b4a18e150614: 2024-11-23T17:28:11,606 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,1,1732382889753.16f732981a718b8fb768b4a18e150614. for snaptb0-testConsecutiveExports completed. 2024-11-23T17:28:11,606 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,1,1732382889753.16f732981a718b8fb768b4a18e150614.' region-info for snapshot=snaptb0-testConsecutiveExports 2024-11-23T17:28:11,606 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:28:11,606 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testConsecutiveExports/16f732981a718b8fb768b4a18e150614/cf/f51ec1c979ce4f99b5650f4007374a17] hfiles 2024-11-23T17:28:11,607 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testConsecutiveExports/16f732981a718b8fb768b4a18e150614/cf/f51ec1c979ce4f99b5650f4007374a17 for snapshot=snaptb0-testConsecutiveExports 2024-11-23T17:28:11,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742078_1254 (size=107) 2024-11-23T17:28:11,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742078_1254 (size=107) 2024-11-23T17:28:11,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742078_1254 (size=107) 2024-11-23T17:28:11,641 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,,1732382889753.cf70026560edaaccf8b1409145592395. 2024-11-23T17:28:11,641 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=123 2024-11-23T17:28:11,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.HMaster(4169): Remote procedure done, pid=123 2024-11-23T17:28:11,642 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testConsecutiveExports on region cf70026560edaaccf8b1409145592395 2024-11-23T17:28:11,642 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=123, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure cf70026560edaaccf8b1409145592395 2024-11-23T17:28:11,645 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=123, ppid=122, state=SUCCESS, hasLock=false; SnapshotRegionProcedure cf70026560edaaccf8b1409145592395 in 387 msec 2024-11-23T17:28:11,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742079_1255 (size=107) 2024-11-23T17:28:11,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742079_1255 (size=107) 2024-11-23T17:28:11,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742079_1255 (size=107) 2024-11-23T17:28:11,673 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,1,1732382889753.16f732981a718b8fb768b4a18e150614. 2024-11-23T17:28:11,673 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=124 2024-11-23T17:28:11,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.HMaster(4169): Remote procedure done, pid=124 2024-11-23T17:28:11,674 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testConsecutiveExports on region 16f732981a718b8fb768b4a18e150614 2024-11-23T17:28:11,674 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=124, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 16f732981a718b8fb768b4a18e150614 2024-11-23T17:28:11,677 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=124, resume processing ppid=122 2024-11-23T17:28:11,677 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-23T17:28:11,677 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=124, ppid=122, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 16f732981a718b8fb768b4a18e150614 in 419 msec 2024-11-23T17:28:11,678 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-23T17:28:11,678 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-23T17:28:11,678 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testConsecutiveExports 2024-11-23T17:28:11,679 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-11-23T17:28:11,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742080_1256 (size=621) 2024-11-23T17:28:11,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742080_1256 (size=621) 2024-11-23T17:28:11,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742080_1256 (size=621) 2024-11-23T17:28:11,714 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-23T17:28:11,723 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-23T17:28:11,724 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-11-23T17:28:11,727 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-23T17:28:11,727 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 122 2024-11-23T17:28:11,729 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=122, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } in 524 msec 2024-11-23T17:28:11,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-11-23T17:28:11,837 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testConsecutiveExports completed 2024-11-23T17:28:11,837 INFO [Time-limited test {}] snapshot.TestExportSnapshot(523): Local export destination path: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/local-export-1732382891837 2024-11-23T17:28:11,837 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/local-export-1732382891837, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/local-export-1732382891837, srcFsUri=hdfs://localhost:37605, srcDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf 2024-11-23T17:28:11,891 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:37605, inputRoot=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf 2024-11-23T17:28:11,891 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=org.apache.hadoop.fs.LocalFileSystem@ff95ab9, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/local-export-1732382891837, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/local-export-1732382891837/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-11-23T17:28:11,896 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-11-23T17:28:11,912 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/snaptb0-testConsecutiveExports to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/local-export-1732382891837/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-11-23T17:28:12,043 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:28:12,043 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:28:12,044 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:28:14,029 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732382632246_0004_000001 (auth:SIMPLE) from 127.0.0.1:59286 2024-11-23T17:28:14,056 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/hadoop-1982728629359025040.jar 2024-11-23T17:28:14,057 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:28:14,058 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:28:14,062 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-1_0/usercache/jenkins/appcache/application_1732382632246_0004/container_1732382632246_0004_01_000001/launch_container.sh] 2024-11-23T17:28:14,062 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-1_0/usercache/jenkins/appcache/application_1732382632246_0004/container_1732382632246_0004_01_000001/container_tokens] 2024-11-23T17:28:14,063 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-1_0/usercache/jenkins/appcache/application_1732382632246_0004/container_1732382632246_0004_01_000001/sysfs] 2024-11-23T17:28:14,149 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/hadoop-9666605335339063722.jar 2024-11-23T17:28:14,163 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:28:14,164 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:28:14,164 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:28:14,165 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:28:14,165 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:28:14,166 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:28:14,166 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-23T17:28:14,166 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-23T17:28:14,167 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-23T17:28:14,167 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-23T17:28:14,168 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-23T17:28:14,168 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-23T17:28:14,168 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-23T17:28:14,169 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-23T17:28:14,169 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-23T17:28:14,169 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-23T17:28:14,170 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-23T17:28:14,170 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:28:14,170 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:28:14,171 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T17:28:14,171 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:28:14,171 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:28:14,172 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T17:28:14,172 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T17:28:14,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742081_1257 (size=131440) 2024-11-23T17:28:14,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742081_1257 (size=131440) 2024-11-23T17:28:14,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742081_1257 (size=131440) 2024-11-23T17:28:14,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742082_1258 (size=4188619) 2024-11-23T17:28:14,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742082_1258 (size=4188619) 2024-11-23T17:28:14,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742082_1258 (size=4188619) 2024-11-23T17:28:14,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742083_1259 (size=1323991) 2024-11-23T17:28:14,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742083_1259 (size=1323991) 2024-11-23T17:28:14,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742083_1259 (size=1323991) 2024-11-23T17:28:14,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742084_1260 (size=903733) 2024-11-23T17:28:14,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742084_1260 (size=903733) 2024-11-23T17:28:14,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742084_1260 (size=903733) 2024-11-23T17:28:15,025 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-23T17:28:15,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742085_1261 (size=8360083) 2024-11-23T17:28:15,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742085_1261 (size=8360083) 2024-11-23T17:28:15,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742085_1261 (size=8360083) 2024-11-23T17:28:15,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742086_1262 (size=1877034) 2024-11-23T17:28:15,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742086_1262 (size=1877034) 2024-11-23T17:28:15,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742086_1262 (size=1877034) 2024-11-23T17:28:15,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742087_1263 (size=77835) 2024-11-23T17:28:15,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742087_1263 (size=77835) 2024-11-23T17:28:15,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742087_1263 (size=77835) 2024-11-23T17:28:15,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742088_1264 (size=30949) 2024-11-23T17:28:15,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742088_1264 (size=30949) 2024-11-23T17:28:15,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742088_1264 (size=30949) 2024-11-23T17:28:16,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742089_1265 (size=1597347) 2024-11-23T17:28:16,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742089_1265 (size=1597347) 2024-11-23T17:28:16,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742089_1265 (size=1597347) 2024-11-23T17:28:16,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742090_1266 (size=6424742) 2024-11-23T17:28:16,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742090_1266 (size=6424742) 2024-11-23T17:28:16,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742090_1266 (size=6424742) 2024-11-23T17:28:16,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742091_1267 (size=4695811) 2024-11-23T17:28:16,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742091_1267 (size=4695811) 2024-11-23T17:28:16,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742091_1267 (size=4695811) 2024-11-23T17:28:16,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742092_1268 (size=232957) 2024-11-23T17:28:16,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742092_1268 (size=232957) 2024-11-23T17:28:16,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742092_1268 (size=232957) 2024-11-23T17:28:16,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742093_1269 (size=440957) 2024-11-23T17:28:16,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742093_1269 (size=440957) 2024-11-23T17:28:16,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742093_1269 (size=440957) 2024-11-23T17:28:16,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742094_1270 (size=127628) 2024-11-23T17:28:16,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742094_1270 (size=127628) 2024-11-23T17:28:16,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742094_1270 (size=127628) 2024-11-23T17:28:16,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742095_1271 (size=20406) 2024-11-23T17:28:16,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742095_1271 (size=20406) 2024-11-23T17:28:16,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742095_1271 (size=20406) 2024-11-23T17:28:17,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742096_1272 (size=5175431) 2024-11-23T17:28:17,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742096_1272 (size=5175431) 2024-11-23T17:28:17,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742096_1272 (size=5175431) 2024-11-23T17:28:17,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742097_1273 (size=217634) 2024-11-23T17:28:17,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742097_1273 (size=217634) 2024-11-23T17:28:17,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742097_1273 (size=217634) 2024-11-23T17:28:17,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742098_1274 (size=1832290) 2024-11-23T17:28:17,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742098_1274 (size=1832290) 2024-11-23T17:28:17,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742098_1274 (size=1832290) 2024-11-23T17:28:17,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742099_1275 (size=322274) 2024-11-23T17:28:17,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742099_1275 (size=322274) 2024-11-23T17:28:17,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742099_1275 (size=322274) 2024-11-23T17:28:17,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742100_1276 (size=503880) 2024-11-23T17:28:17,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742100_1276 (size=503880) 2024-11-23T17:28:17,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742100_1276 (size=503880) 2024-11-23T17:28:17,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742101_1277 (size=29229) 2024-11-23T17:28:17,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742101_1277 (size=29229) 2024-11-23T17:28:17,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742101_1277 (size=29229) 2024-11-23T17:28:17,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742102_1278 (size=24096) 2024-11-23T17:28:17,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742102_1278 (size=24096) 2024-11-23T17:28:17,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742102_1278 (size=24096) 2024-11-23T17:28:17,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742103_1279 (size=111872) 2024-11-23T17:28:17,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742103_1279 (size=111872) 2024-11-23T17:28:17,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742103_1279 (size=111872) 2024-11-23T17:28:17,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742104_1280 (size=45609) 2024-11-23T17:28:17,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742104_1280 (size=45609) 2024-11-23T17:28:17,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742104_1280 (size=45609) 2024-11-23T17:28:17,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742105_1281 (size=136454) 2024-11-23T17:28:17,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742105_1281 (size=136454) 2024-11-23T17:28:17,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742105_1281 (size=136454) 2024-11-23T17:28:17,978 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-23T17:28:17,994 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testConsecutiveExports' hfile list 2024-11-23T17:28:17,998 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=8.1 K 2024-11-23T17:28:17,998 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=5.2 K 2024-11-23T17:28:18,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742106_1282 (size=441) 2024-11-23T17:28:18,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742106_1282 (size=441) 2024-11-23T17:28:18,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742106_1282 (size=441) 2024-11-23T17:28:18,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742107_1283 (size=21) 2024-11-23T17:28:18,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742107_1283 (size=21) 2024-11-23T17:28:18,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742107_1283 (size=21) 2024-11-23T17:28:18,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742108_1284 (size=304045) 2024-11-23T17:28:18,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742108_1284 (size=304045) 2024-11-23T17:28:18,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742108_1284 (size=304045) 2024-11-23T17:28:18,402 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-23T17:28:18,402 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-23T17:28:18,495 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732382632246_0005_000001 (auth:SIMPLE) from 127.0.0.1:49894 2024-11-23T17:28:19,806 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports 2024-11-23T17:28:19,806 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports Metrics about Tables on a single HBase RegionServer 2024-11-23T17:28:24,843 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732382632246_0005_000001 (auth:SIMPLE) from 127.0.0.1:36246 2024-11-23T17:28:25,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742109_1285 (size=349743) 2024-11-23T17:28:25,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742109_1285 (size=349743) 2024-11-23T17:28:25,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742109_1285 (size=349743) 2024-11-23T17:28:27,084 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732382632246_0005_000001 (auth:SIMPLE) from 127.0.0.1:57084 2024-11-23T17:28:27,084 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732382632246_0005_000001 (auth:SIMPLE) from 127.0.0.1:41470 2024-11-23T17:28:32,142 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-0_2/usercache/jenkins/appcache/application_1732382632246_0005/container_1732382632246_0005_01_000003/launch_container.sh] 2024-11-23T17:28:32,143 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-0_2/usercache/jenkins/appcache/application_1732382632246_0005/container_1732382632246_0005_01_000003/container_tokens] 2024-11-23T17:28:32,143 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-0_2/usercache/jenkins/appcache/application_1732382632246_0005/container_1732382632246_0005_01_000003/sysfs] 2024-11-23T17:28:34,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742110_1286 (size=22235) 2024-11-23T17:28:34,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742110_1286 (size=22235) 2024-11-23T17:28:34,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742110_1286 (size=22235) 2024-11-23T17:28:34,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742111_1287 (size=463) 2024-11-23T17:28:34,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742111_1287 (size=463) 2024-11-23T17:28:34,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742111_1287 (size=463) 2024-11-23T17:28:34,119 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-1_1/usercache/jenkins/appcache/application_1732382632246_0005/container_1732382632246_0005_01_000002/launch_container.sh] 2024-11-23T17:28:34,119 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-1_1/usercache/jenkins/appcache/application_1732382632246_0005/container_1732382632246_0005_01_000002/container_tokens] 2024-11-23T17:28:34,119 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-1_1/usercache/jenkins/appcache/application_1732382632246_0005/container_1732382632246_0005_01_000002/sysfs] 2024-11-23T17:28:34,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742112_1288 (size=22235) 2024-11-23T17:28:34,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742112_1288 (size=22235) 2024-11-23T17:28:34,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742112_1288 (size=22235) 2024-11-23T17:28:34,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742113_1289 (size=349743) 2024-11-23T17:28:34,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742113_1289 (size=349743) 2024-11-23T17:28:34,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742113_1289 (size=349743) 2024-11-23T17:28:34,267 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732382632246_0005_000001 (auth:SIMPLE) from 127.0.0.1:41770 2024-11-23T17:28:35,622 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-11-23T17:28:35,622 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-11-23T17:28:35,657 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testConsecutiveExports 2024-11-23T17:28:35,657 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-11-23T17:28:35,657 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-11-23T17:28:35,657 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1325804072_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/snaptb0-testConsecutiveExports at hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-11-23T17:28:35,662 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-11-23T17:28:35,662 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-11-23T17:28:35,662 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in org.apache.hadoop.fs.LocalFileSystem@ff95ab9 in root file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/local-export-1732382891837/.hbase-snapshot/snaptb0-testConsecutiveExports at file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/local-export-1732382891837/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-11-23T17:28:35,662 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/local-export-1732382891837/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-11-23T17:28:35,662 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/local-export-1732382891837/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-11-23T17:28:35,664 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/local-export-1732382891837, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/local-export-1732382891837, srcFsUri=hdfs://localhost:37605, srcDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf 2024-11-23T17:28:35,714 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:37605, inputRoot=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf 2024-11-23T17:28:35,714 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=org.apache.hadoop.fs.LocalFileSystem@ff95ab9, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/local-export-1732382891837, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/local-export-1732382891837/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-11-23T17:28:35,716 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-11-23T17:28:35,734 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/snaptb0-testConsecutiveExports to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/local-export-1732382891837/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-11-23T17:28:35,779 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:28:35,780 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:28:35,780 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:28:36,896 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/hadoop-13946530096705022438.jar 2024-11-23T17:28:36,896 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:28:36,896 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:28:36,958 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/hadoop-834384060041135186.jar 2024-11-23T17:28:36,959 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:28:36,959 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:28:36,959 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:28:36,960 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:28:36,960 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:28:36,960 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:28:36,960 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-23T17:28:36,960 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-23T17:28:36,961 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-23T17:28:36,961 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-23T17:28:36,961 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-23T17:28:36,961 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-23T17:28:36,961 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-23T17:28:36,962 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-23T17:28:36,962 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-23T17:28:36,962 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-23T17:28:36,962 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-23T17:28:36,963 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:28:36,963 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:28:36,963 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T17:28:36,963 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:28:36,963 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:28:36,964 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T17:28:36,964 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T17:28:37,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742114_1290 (size=131440) 2024-11-23T17:28:37,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742114_1290 (size=131440) 2024-11-23T17:28:37,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742114_1290 (size=131440) 2024-11-23T17:28:37,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742115_1291 (size=4188619) 2024-11-23T17:28:37,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742115_1291 (size=4188619) 2024-11-23T17:28:37,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742115_1291 (size=4188619) 2024-11-23T17:28:37,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742116_1292 (size=1323991) 2024-11-23T17:28:37,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742116_1292 (size=1323991) 2024-11-23T17:28:37,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742116_1292 (size=1323991) 2024-11-23T17:28:37,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742117_1293 (size=903733) 2024-11-23T17:28:37,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742117_1293 (size=903733) 2024-11-23T17:28:37,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742117_1293 (size=903733) 2024-11-23T17:28:37,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742118_1294 (size=8360083) 2024-11-23T17:28:37,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742118_1294 (size=8360083) 2024-11-23T17:28:37,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742118_1294 (size=8360083) 2024-11-23T17:28:37,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742119_1295 (size=1877034) 2024-11-23T17:28:37,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742119_1295 (size=1877034) 2024-11-23T17:28:37,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742119_1295 (size=1877034) 2024-11-23T17:28:37,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742120_1296 (size=77835) 2024-11-23T17:28:37,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742120_1296 (size=77835) 2024-11-23T17:28:37,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742120_1296 (size=77835) 2024-11-23T17:28:37,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742121_1297 (size=30949) 2024-11-23T17:28:37,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742121_1297 (size=30949) 2024-11-23T17:28:37,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742121_1297 (size=30949) 2024-11-23T17:28:37,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742122_1298 (size=1597347) 2024-11-23T17:28:37,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742122_1298 (size=1597347) 2024-11-23T17:28:37,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742122_1298 (size=1597347) 2024-11-23T17:28:37,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742123_1299 (size=4695811) 2024-11-23T17:28:37,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742123_1299 (size=4695811) 2024-11-23T17:28:37,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742123_1299 (size=4695811) 2024-11-23T17:28:37,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742124_1300 (size=232957) 2024-11-23T17:28:37,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742124_1300 (size=232957) 2024-11-23T17:28:37,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742124_1300 (size=232957) 2024-11-23T17:28:37,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742125_1301 (size=127628) 2024-11-23T17:28:37,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742125_1301 (size=127628) 2024-11-23T17:28:37,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742125_1301 (size=127628) 2024-11-23T17:28:37,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742126_1302 (size=20406) 2024-11-23T17:28:37,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742126_1302 (size=20406) 2024-11-23T17:28:37,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742126_1302 (size=20406) 2024-11-23T17:28:37,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742127_1303 (size=6424742) 2024-11-23T17:28:37,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742127_1303 (size=6424742) 2024-11-23T17:28:37,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742127_1303 (size=6424742) 2024-11-23T17:28:37,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742128_1304 (size=5175431) 2024-11-23T17:28:37,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742128_1304 (size=5175431) 2024-11-23T17:28:37,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742128_1304 (size=5175431) 2024-11-23T17:28:37,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742129_1305 (size=217634) 2024-11-23T17:28:37,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742129_1305 (size=217634) 2024-11-23T17:28:37,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742129_1305 (size=217634) 2024-11-23T17:28:37,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742130_1306 (size=1832290) 2024-11-23T17:28:37,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742130_1306 (size=1832290) 2024-11-23T17:28:37,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742130_1306 (size=1832290) 2024-11-23T17:28:37,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742131_1307 (size=322274) 2024-11-23T17:28:37,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742131_1307 (size=322274) 2024-11-23T17:28:37,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742131_1307 (size=322274) 2024-11-23T17:28:37,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742132_1308 (size=503880) 2024-11-23T17:28:37,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742132_1308 (size=503880) 2024-11-23T17:28:37,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742132_1308 (size=503880) 2024-11-23T17:28:37,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742133_1309 (size=29229) 2024-11-23T17:28:37,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742133_1309 (size=29229) 2024-11-23T17:28:37,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742133_1309 (size=29229) 2024-11-23T17:28:37,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742134_1310 (size=24096) 2024-11-23T17:28:37,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742134_1310 (size=24096) 2024-11-23T17:28:37,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742134_1310 (size=24096) 2024-11-23T17:28:37,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742135_1311 (size=111872) 2024-11-23T17:28:37,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742135_1311 (size=111872) 2024-11-23T17:28:37,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742135_1311 (size=111872) 2024-11-23T17:28:37,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742136_1312 (size=440957) 2024-11-23T17:28:37,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742136_1312 (size=440957) 2024-11-23T17:28:37,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742136_1312 (size=440957) 2024-11-23T17:28:37,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742137_1313 (size=45609) 2024-11-23T17:28:37,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742137_1313 (size=45609) 2024-11-23T17:28:37,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742137_1313 (size=45609) 2024-11-23T17:28:37,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742138_1314 (size=136454) 2024-11-23T17:28:37,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742138_1314 (size=136454) 2024-11-23T17:28:37,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742138_1314 (size=136454) 2024-11-23T17:28:37,475 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-23T17:28:37,477 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testConsecutiveExports' hfile list 2024-11-23T17:28:37,479 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=8.1 K 2024-11-23T17:28:37,479 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=5.2 K 2024-11-23T17:28:37,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742139_1315 (size=441) 2024-11-23T17:28:37,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742139_1315 (size=441) 2024-11-23T17:28:37,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742139_1315 (size=441) 2024-11-23T17:28:37,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742140_1316 (size=21) 2024-11-23T17:28:37,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742140_1316 (size=21) 2024-11-23T17:28:37,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742140_1316 (size=21) 2024-11-23T17:28:37,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742141_1317 (size=304045) 2024-11-23T17:28:37,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742141_1317 (size=304045) 2024-11-23T17:28:37,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742141_1317 (size=304045) 2024-11-23T17:28:38,091 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T17:28:40,476 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-23T17:28:40,476 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-23T17:28:40,497 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732382632246_0005_000001 (auth:SIMPLE) from 127.0.0.1:50496 2024-11-23T17:28:40,540 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-0_0/usercache/jenkins/appcache/application_1732382632246_0005/container_1732382632246_0005_01_000001/launch_container.sh] 2024-11-23T17:28:40,540 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-0_0/usercache/jenkins/appcache/application_1732382632246_0005/container_1732382632246_0005_01_000001/container_tokens] 2024-11-23T17:28:40,541 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-0_0/usercache/jenkins/appcache/application_1732382632246_0005/container_1732382632246_0005_01_000001/sysfs] 2024-11-23T17:28:40,856 INFO [LruBlockCacheStatsExecutor {}] hfile.LruBlockCache(1020): totalSize=880 MB, usedSize=660.99 KB, freeSize=879.35 MB, max=880 MB, blockCount=0, accesses=0, hits=0, hitRatio=0, cachingAccesses=0, cachingHits=0, cachingHitsRatio=0,evictions=29, evicted=0, evictedPerRun=0.0 2024-11-23T17:28:40,964 INFO [LruBlockCacheStatsExecutor {}] hfile.LruBlockCache(1020): totalSize=880 MB, usedSize=919.14 KB, freeSize=879.10 MB, max=880 MB, blockCount=3, accesses=5, hits=2, hitRatio=40.00%, , cachingAccesses=5, cachingHits=2, cachingHitsRatio=40.00%, evictions=29, evicted=0, evictedPerRun=0.0 2024-11-23T17:28:41,036 INFO [LruBlockCacheStatsExecutor {}] hfile.LruBlockCache(1020): totalSize=880 MB, usedSize=660.99 KB, freeSize=879.35 MB, max=880 MB, blockCount=0, accesses=0, hits=0, hitRatio=0, cachingAccesses=0, cachingHits=0, cachingHitsRatio=0,evictions=29, evicted=0, evictedPerRun=0.0 2024-11-23T17:28:41,123 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732382632246_0006_000001 (auth:SIMPLE) from 127.0.0.1:41774 2024-11-23T17:28:41,264 DEBUG [master/215bb12e999f:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=2, created chunk count=10, reused chunk count=10, reuseRatio=50.00% 2024-11-23T17:28:41,272 DEBUG [master/215bb12e999f:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-11-23T17:28:43,895 INFO [regionserver/215bb12e999f:0.Chore.1 {}] regionserver.Replication$ReplicationStatisticsChore(208): Global stats: WAL Edits Buffer Used=0B, Limit=268435456B 2024-11-23T17:28:43,904 INFO [regionserver/215bb12e999f:0.Chore.1 {}] regionserver.Replication$ReplicationStatisticsChore(208): Global stats: WAL Edits Buffer Used=0B, Limit=268435456B 2024-11-23T17:28:43,955 INFO [regionserver/215bb12e999f:0.Chore.1 {}] regionserver.Replication$ReplicationStatisticsChore(208): Global stats: WAL Edits Buffer Used=0B, Limit=268435456B 2024-11-23T17:28:45,418 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(219): Skipping table testtb-testConsecutiveExports because normalization is disabled in its table properties and normalization is also disabled at table level by default 2024-11-23T17:28:45,421 DEBUG [master/215bb12e999f:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region cf70026560edaaccf8b1409145592395 changed from -1.0 to 0.0, refreshing cache 2024-11-23T17:28:45,421 DEBUG [master/215bb12e999f:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 16f732981a718b8fb768b4a18e150614 changed from -1.0 to 0.0, refreshing cache 2024-11-23T17:28:45,421 DEBUG [master/215bb12e999f:0.Chore.1 {}] rsgroup.RSGroupBasedLoadBalancer(138): Balancing RSGroup=default 2024-11-23T17:28:45,421 INFO [master/215bb12e999f:0.Chore.1 {}] rsgroup.RSGroupBasedLoadBalancer(151): Start Generate Balance plan for group: default 2024-11-23T17:28:45,421 DEBUG [master/215bb12e999f:0.Chore.1 {}] balancer.BaseLoadBalancer(619): Start Generate Balance plan for cluster. 2024-11-23T17:28:45,422 DEBUG [master/215bb12e999f:0.Chore.1 {}] balancer.BalancerClusterState(204): Hosts are {215bb12e999f=0} racks are {/default-rack=0} 2024-11-23T17:28:45,422 DEBUG [master/215bb12e999f:0.Chore.1 {}] balancer.BalancerClusterState(303): server 0 has 1 regions 2024-11-23T17:28:45,422 DEBUG [master/215bb12e999f:0.Chore.1 {}] balancer.BalancerClusterState(303): server 1 has 2 regions 2024-11-23T17:28:45,422 DEBUG [master/215bb12e999f:0.Chore.1 {}] balancer.BalancerClusterState(303): server 2 has 1 regions 2024-11-23T17:28:45,423 DEBUG [master/215bb12e999f:0.Chore.1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-23T17:28:45,423 DEBUG [master/215bb12e999f:0.Chore.1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-23T17:28:45,423 DEBUG [master/215bb12e999f:0.Chore.1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-23T17:28:45,423 INFO [master/215bb12e999f:0.Chore.1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-23T17:28:45,423 INFO [master/215bb12e999f:0.Chore.1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-23T17:28:45,423 INFO [master/215bb12e999f:0.Chore.1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-23T17:28:45,423 DEBUG [master/215bb12e999f:0.Chore.1 {}] balancer.BalancerClusterState(326): Number of tables=3, number of hosts=1, number of racks=1 2024-11-23T17:28:45,428 INFO [master/215bb12e999f:0.Chore.1 {}] balancer.StochasticLoadBalancer(395): Cluster wide - skipping load balancing because weighted average imbalance=0.014147175573678013 <= threshold(0.025). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 0.025 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); ServerLocalityCostFunction : (multiplier=25.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.8571565880154666, need balance); CPRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.8603105266290443, need balance); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-23T17:28:45,429 DEBUG [master/215bb12e999f:0.Chore.1 {}] master.HMaster(2203): Balancer is going into sleep until next period in 300000ms 2024-11-23T17:28:45,441 DEBUG [master/215bb12e999f:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-23T17:28:45,950 INFO [regionserver/215bb12e999f:0.Chore.1 {}] regionserver.HRegionServer$PeriodicMemStoreFlusher(1763): MemstoreFlusherChore requesting flush of hbase:meta,,1.1588230740 because 1588230740/info has an old edit so flush to free WALs after random delay 294495 ms 2024-11-23T17:28:50,523 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-23T17:28:52,289 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732382632246_0006_000001 (auth:SIMPLE) from 127.0.0.1:35132 2024-11-23T17:28:52,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742142_1318 (size=349743) 2024-11-23T17:28:52,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742142_1318 (size=349743) 2024-11-23T17:28:52,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742142_1318 (size=349743) 2024-11-23T17:28:54,541 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732382632246_0006_000001 (auth:SIMPLE) from 127.0.0.1:51704 2024-11-23T17:28:54,547 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732382632246_0006_000001 (auth:SIMPLE) from 127.0.0.1:40660 2024-11-23T17:28:55,159 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 16f732981a718b8fb768b4a18e150614, had cached 0 bytes from a total of 8324 2024-11-23T17:28:55,171 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region cf70026560edaaccf8b1409145592395, had cached 0 bytes from a total of 5288 2024-11-23T17:28:59,895 INFO [regionserver/215bb12e999f:0.Chore.1 {}] regionserver.HRegionServer$PeriodicMemStoreFlusher(1763): MemstoreFlusherChore requesting flush of hbase:acl,,1732382625522.41264850465b123187f03a78b2521b39. because 41264850465b123187f03a78b2521b39/l has an old edit so flush to free WALs after random delay 21209 ms 2024-11-23T17:29:01,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742143_1319 (size=21192) 2024-11-23T17:29:01,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742143_1319 (size=21192) 2024-11-23T17:29:01,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742143_1319 (size=21192) 2024-11-23T17:29:01,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742144_1320 (size=463) 2024-11-23T17:29:01,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742144_1320 (size=463) 2024-11-23T17:29:01,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742144_1320 (size=463) 2024-11-23T17:29:01,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742145_1321 (size=21192) 2024-11-23T17:29:01,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742145_1321 (size=21192) 2024-11-23T17:29:01,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742145_1321 (size=21192) 2024-11-23T17:29:01,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742146_1322 (size=349743) 2024-11-23T17:29:01,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742146_1322 (size=349743) 2024-11-23T17:29:01,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742146_1322 (size=349743) 2024-11-23T17:29:01,261 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732382632246_0006_000001 (auth:SIMPLE) from 127.0.0.1:51714 2024-11-23T17:29:01,286 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1732382632246_0006_01_000002 is : 143 2024-11-23T17:29:01,298 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-0_2/usercache/jenkins/appcache/application_1732382632246_0006/container_1732382632246_0006_01_000002/launch_container.sh] 2024-11-23T17:29:01,298 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-0_2/usercache/jenkins/appcache/application_1732382632246_0006/container_1732382632246_0006_01_000002/container_tokens] 2024-11-23T17:29:01,298 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-0_2/usercache/jenkins/appcache/application_1732382632246_0006/container_1732382632246_0006_01_000002/sysfs] 2024-11-23T17:29:03,012 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-11-23T17:29:03,012 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-11-23T17:29:03,014 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testConsecutiveExports 2024-11-23T17:29:03,014 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-11-23T17:29:03,015 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-11-23T17:29:03,015 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1325804072_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/snaptb0-testConsecutiveExports at hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-11-23T17:29:03,016 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-11-23T17:29:03,016 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-11-23T17:29:03,016 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in org.apache.hadoop.fs.LocalFileSystem@ff95ab9 in root file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/local-export-1732382891837/.hbase-snapshot/snaptb0-testConsecutiveExports at file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/local-export-1732382891837/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-11-23T17:29:03,017 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/local-export-1732382891837/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-11-23T17:29:03,017 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/local-export-1732382891837/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-11-23T17:29:03,037 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testConsecutiveExports 2024-11-23T17:29:03,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure2.ProcedureExecutor(1139): Stored pid=125, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testConsecutiveExports 2024-11-23T17:29:03,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=125 2024-11-23T17:29:03,040 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732382943040"}]},"ts":"1732382943040"} 2024-11-23T17:29:03,042 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=DISABLING in hbase:meta 2024-11-23T17:29:03,042 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testtb-testConsecutiveExports to state=DISABLING 2024-11-23T17:29:03,043 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=126, ppid=125, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testConsecutiveExports}] 2024-11-23T17:29:03,044 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=127, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=cf70026560edaaccf8b1409145592395, UNASSIGN}, {pid=128, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=16f732981a718b8fb768b4a18e150614, UNASSIGN}] 2024-11-23T17:29:03,045 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=128, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=16f732981a718b8fb768b4a18e150614, UNASSIGN 2024-11-23T17:29:03,045 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=127, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=cf70026560edaaccf8b1409145592395, UNASSIGN 2024-11-23T17:29:03,046 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=128 updating hbase:meta row=16f732981a718b8fb768b4a18e150614, regionState=CLOSING, regionLocation=215bb12e999f,45467,1732382620632 2024-11-23T17:29:03,046 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=127 updating hbase:meta row=cf70026560edaaccf8b1409145592395, regionState=CLOSING, regionLocation=215bb12e999f,44667,1732382620918 2024-11-23T17:29:03,047 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=128, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=16f732981a718b8fb768b4a18e150614, UNASSIGN because future has completed 2024-11-23T17:29:03,047 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T17:29:03,047 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=129, ppid=128, state=RUNNABLE, hasLock=false; CloseRegionProcedure 16f732981a718b8fb768b4a18e150614, server=215bb12e999f,45467,1732382620632}] 2024-11-23T17:29:03,048 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=127, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=cf70026560edaaccf8b1409145592395, UNASSIGN because future has completed 2024-11-23T17:29:03,048 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T17:29:03,048 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=130, ppid=127, state=RUNNABLE, hasLock=false; CloseRegionProcedure cf70026560edaaccf8b1409145592395, server=215bb12e999f,44667,1732382620918}] 2024-11-23T17:29:03,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=125 2024-11-23T17:29:03,200 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(122): Close 16f732981a718b8fb768b4a18e150614 2024-11-23T17:29:03,200 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] handler.UnassignRegionHandler(122): Close cf70026560edaaccf8b1409145592395 2024-11-23T17:29:03,200 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T17:29:03,200 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T17:29:03,200 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1722): Closing 16f732981a718b8fb768b4a18e150614, disabling compactions & flushes 2024-11-23T17:29:03,200 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1722): Closing cf70026560edaaccf8b1409145592395, disabling compactions & flushes 2024-11-23T17:29:03,200 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,1,1732382889753.16f732981a718b8fb768b4a18e150614. 2024-11-23T17:29:03,200 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,,1732382889753.cf70026560edaaccf8b1409145592395. 2024-11-23T17:29:03,200 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,,1732382889753.cf70026560edaaccf8b1409145592395. 2024-11-23T17:29:03,200 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,1,1732382889753.16f732981a718b8fb768b4a18e150614. 2024-11-23T17:29:03,200 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,1,1732382889753.16f732981a718b8fb768b4a18e150614. after waiting 0 ms 2024-11-23T17:29:03,200 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,,1732382889753.cf70026560edaaccf8b1409145592395. after waiting 0 ms 2024-11-23T17:29:03,200 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,,1732382889753.cf70026560edaaccf8b1409145592395. 2024-11-23T17:29:03,200 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,1,1732382889753.16f732981a718b8fb768b4a18e150614. 2024-11-23T17:29:03,205 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testConsecutiveExports/16f732981a718b8fb768b4a18e150614/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-23T17:29:03,205 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testConsecutiveExports/cf70026560edaaccf8b1409145592395/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-23T17:29:03,228 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:29:03,228 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:29:03,228 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,,1732382889753.cf70026560edaaccf8b1409145592395. 2024-11-23T17:29:03,228 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,1,1732382889753.16f732981a718b8fb768b4a18e150614. 2024-11-23T17:29:03,228 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1676): Region close journal for cf70026560edaaccf8b1409145592395: Waiting for close lock at 1732382943200Running coprocessor pre-close hooks at 1732382943200Disabling compacts and flushes for region at 1732382943200Disabling writes for close at 1732382943200Writing region close event to WAL at 1732382943201 (+1 ms)Running coprocessor post-close hooks at 1732382943228 (+27 ms)Closed at 1732382943228 2024-11-23T17:29:03,228 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1676): Region close journal for 16f732981a718b8fb768b4a18e150614: Waiting for close lock at 1732382943200Running coprocessor pre-close hooks at 1732382943200Disabling compacts and flushes for region at 1732382943200Disabling writes for close at 1732382943200Writing region close event to WAL at 1732382943201 (+1 ms)Running coprocessor post-close hooks at 1732382943228 (+27 ms)Closed at 1732382943228 2024-11-23T17:29:03,230 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(157): Closed 16f732981a718b8fb768b4a18e150614 2024-11-23T17:29:03,230 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=128 updating hbase:meta row=16f732981a718b8fb768b4a18e150614, regionState=CLOSED 2024-11-23T17:29:03,231 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] handler.UnassignRegionHandler(157): Closed cf70026560edaaccf8b1409145592395 2024-11-23T17:29:03,231 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=127 updating hbase:meta row=cf70026560edaaccf8b1409145592395, regionState=CLOSED 2024-11-23T17:29:03,232 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=129, ppid=128, state=RUNNABLE, hasLock=false; CloseRegionProcedure 16f732981a718b8fb768b4a18e150614, server=215bb12e999f,45467,1732382620632 because future has completed 2024-11-23T17:29:03,233 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=130, ppid=127, state=RUNNABLE, hasLock=false; CloseRegionProcedure cf70026560edaaccf8b1409145592395, server=215bb12e999f,44667,1732382620918 because future has completed 2024-11-23T17:29:03,234 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=129, resume processing ppid=128 2024-11-23T17:29:03,235 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=129, ppid=128, state=SUCCESS, hasLock=false; CloseRegionProcedure 16f732981a718b8fb768b4a18e150614, server=215bb12e999f,45467,1732382620632 in 186 msec 2024-11-23T17:29:03,235 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=130, resume processing ppid=127 2024-11-23T17:29:03,235 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=130, ppid=127, state=SUCCESS, hasLock=false; CloseRegionProcedure cf70026560edaaccf8b1409145592395, server=215bb12e999f,44667,1732382620918 in 186 msec 2024-11-23T17:29:03,236 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=128, ppid=126, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=16f732981a718b8fb768b4a18e150614, UNASSIGN in 191 msec 2024-11-23T17:29:03,237 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=127, resume processing ppid=126 2024-11-23T17:29:03,237 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=127, ppid=126, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=cf70026560edaaccf8b1409145592395, UNASSIGN in 191 msec 2024-11-23T17:29:03,239 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=126, resume processing ppid=125 2024-11-23T17:29:03,240 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=126, ppid=125, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testConsecutiveExports in 194 msec 2024-11-23T17:29:03,241 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732382943241"}]},"ts":"1732382943241"} 2024-11-23T17:29:03,243 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=DISABLED in hbase:meta 2024-11-23T17:29:03,243 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testConsecutiveExports to state=DISABLED 2024-11-23T17:29:03,246 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=125, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testConsecutiveExports in 207 msec 2024-11-23T17:29:03,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=125 2024-11-23T17:29:03,356 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testConsecutiveExports completed 2024-11-23T17:29:03,357 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testConsecutiveExports 2024-11-23T17:29:03,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure2.ProcedureExecutor(1139): Stored pid=131, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-23T17:29:03,358 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=131, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-23T17:29:03,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testConsecutiveExports 2024-11-23T17:29:03,359 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=131, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-23T17:29:03,361 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41065 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testConsecutiveExports 2024-11-23T17:29:03,363 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testConsecutiveExports/cf70026560edaaccf8b1409145592395 2024-11-23T17:29:03,363 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testConsecutiveExports/16f732981a718b8fb768b4a18e150614 2024-11-23T17:29:03,364 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testConsecutiveExports/16f732981a718b8fb768b4a18e150614/cf, FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testConsecutiveExports/16f732981a718b8fb768b4a18e150614/recovered.edits] 2024-11-23T17:29:03,364 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testConsecutiveExports/cf70026560edaaccf8b1409145592395/cf, FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testConsecutiveExports/cf70026560edaaccf8b1409145592395/recovered.edits] 2024-11-23T17:29:03,367 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testConsecutiveExports/cf70026560edaaccf8b1409145592395/cf/69b3c627daef4488a1595bbe24fac21e to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/archive/data/default/testtb-testConsecutiveExports/cf70026560edaaccf8b1409145592395/cf/69b3c627daef4488a1595bbe24fac21e 2024-11-23T17:29:03,367 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testConsecutiveExports/16f732981a718b8fb768b4a18e150614/cf/f51ec1c979ce4f99b5650f4007374a17 to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/archive/data/default/testtb-testConsecutiveExports/16f732981a718b8fb768b4a18e150614/cf/f51ec1c979ce4f99b5650f4007374a17 2024-11-23T17:29:03,500 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testConsecutiveExports/16f732981a718b8fb768b4a18e150614/recovered.edits/9.seqid to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/archive/data/default/testtb-testConsecutiveExports/16f732981a718b8fb768b4a18e150614/recovered.edits/9.seqid 2024-11-23T17:29:03,500 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testConsecutiveExports/cf70026560edaaccf8b1409145592395/recovered.edits/9.seqid to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/archive/data/default/testtb-testConsecutiveExports/cf70026560edaaccf8b1409145592395/recovered.edits/9.seqid 2024-11-23T17:29:03,501 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testConsecutiveExports/cf70026560edaaccf8b1409145592395 2024-11-23T17:29:03,501 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testConsecutiveExports/16f732981a718b8fb768b4a18e150614 2024-11-23T17:29:03,501 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testConsecutiveExports regions 2024-11-23T17:29:03,504 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=131, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-23T17:29:03,508 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testConsecutiveExports from hbase:meta 2024-11-23T17:29:03,512 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testConsecutiveExports' descriptor. 2024-11-23T17:29:03,513 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=131, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-23T17:29:03,513 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testConsecutiveExports' from region states. 2024-11-23T17:29:03,513 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports,,1732382889753.cf70026560edaaccf8b1409145592395.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732382943513"}]},"ts":"9223372036854775807"} 2024-11-23T17:29:03,513 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports,1,1732382889753.16f732981a718b8fb768b4a18e150614.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732382943513"}]},"ts":"9223372036854775807"} 2024-11-23T17:29:03,515 INFO [PEWorker-3 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-23T17:29:03,515 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => cf70026560edaaccf8b1409145592395, NAME => 'testtb-testConsecutiveExports,,1732382889753.cf70026560edaaccf8b1409145592395.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 16f732981a718b8fb768b4a18e150614, NAME => 'testtb-testConsecutiveExports,1,1732382889753.16f732981a718b8fb768b4a18e150614.', STARTKEY => '1', ENDKEY => ''}] 2024-11-23T17:29:03,516 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testConsecutiveExports' as deleted. 2024-11-23T17:29:03,516 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732382943516"}]},"ts":"9223372036854775807"} 2024-11-23T17:29:03,517 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testConsecutiveExports state from META 2024-11-23T17:29:03,518 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=131, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-23T17:29:03,519 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=131, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testConsecutiveExports in 161 msec 2024-11-23T17:29:03,573 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x10168b8ee290003, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-23T17:29:03,573 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44667-0x10168b8ee290002, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-23T17:29:03,573 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-23T17:29:03,573 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45467-0x10168b8ee290001, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-23T17:29:03,574 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-11-23T17:29:03,574 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-11-23T17:29:03,574 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-11-23T17:29:03,574 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-11-23T17:29:03,584 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x10168b8ee290003, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-23T17:29:03,584 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45467-0x10168b8ee290001, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-23T17:29:03,584 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-23T17:29:03,584 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44667-0x10168b8ee290002, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-23T17:29:03,584 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x10168b8ee290003, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:29:03,584 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:29:03,584 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45467-0x10168b8ee290001, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:29:03,584 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44667-0x10168b8ee290002, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:29:03,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=131 2024-11-23T17:29:03,585 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testConsecutiveExports 2024-11-23T17:29:03,585 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testConsecutiveExports completed 2024-11-23T17:29:03,591 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testConsecutiveExports" type: DISABLED 2024-11-23T17:29:03,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testConsecutiveExports 2024-11-23T17:29:03,594 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testConsecutiveExports" type: DISABLED 2024-11-23T17:29:03,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testConsecutiveExports 2024-11-23T17:29:03,615 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testConsecutiveExports Thread=802 (was 801) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1325804072_22 at /127.0.0.1:51864 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 117307) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-145269241_1 at /127.0.0.1:39350 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-4931 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-145269241_1 at /127.0.0.1:35070 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-15 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1549690208) connection to localhost/127.0.0.1:45751 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ApplicationMasterLauncher #10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1325804072_22 at /127.0.0.1:40290 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45751 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=799 (was 809), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1231 (was 1320), ProcessCount=20 (was 23), AvailableMemoryMB=1945 (was 2429) 2024-11-23T17:29:03,615 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=802 is superior to 500 2024-11-23T17:29:03,633 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithMergeRegion Thread=802, OpenFileDescriptor=799, MaxFileDescriptor=1048576, SystemLoadAverage=1231, ProcessCount=20, AvailableMemoryMB=1944 2024-11-23T17:29:03,633 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=802 is superior to 500 2024-11-23T17:29:03,634 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T17:29:03,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure2.ProcedureExecutor(1139): Stored pid=132, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-23T17:29:03,636 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T17:29:03,636 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:29:03,636 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithMergeRegion" procId is: 132 2024-11-23T17:29:03,637 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T17:29:03,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=132 2024-11-23T17:29:03,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742147_1323 (size=422) 2024-11-23T17:29:03,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742147_1323 (size=422) 2024-11-23T17:29:03,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742147_1323 (size=422) 2024-11-23T17:29:03,645 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => efb4f5fc98bc6102f21e07256724c719, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1732382943634.efb4f5fc98bc6102f21e07256724c719.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf 2024-11-23T17:29:03,645 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 13b59b0ffd9e01bdd2d43f453a6b2a1a, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1732382943634.13b59b0ffd9e01bdd2d43f453a6b2a1a.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf 2024-11-23T17:29:03,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742148_1324 (size=83) 2024-11-23T17:29:03,656 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,1,1732382943634.efb4f5fc98bc6102f21e07256724c719.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:29:03,656 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1722): Closing efb4f5fc98bc6102f21e07256724c719, disabling compactions & flushes 2024-11-23T17:29:03,656 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,1,1732382943634.efb4f5fc98bc6102f21e07256724c719. 2024-11-23T17:29:03,656 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1732382943634.efb4f5fc98bc6102f21e07256724c719. 2024-11-23T17:29:03,656 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1732382943634.efb4f5fc98bc6102f21e07256724c719. after waiting 0 ms 2024-11-23T17:29:03,657 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,1,1732382943634.efb4f5fc98bc6102f21e07256724c719. 2024-11-23T17:29:03,657 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,1,1732382943634.efb4f5fc98bc6102f21e07256724c719. 2024-11-23T17:29:03,657 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1676): Region close journal for efb4f5fc98bc6102f21e07256724c719: Waiting for close lock at 1732382943656Disabling compacts and flushes for region at 1732382943656Disabling writes for close at 1732382943657 (+1 ms)Writing region close event to WAL at 1732382943657Closed at 1732382943657 2024-11-23T17:29:03,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742148_1324 (size=83) 2024-11-23T17:29:03,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742149_1325 (size=83) 2024-11-23T17:29:03,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742148_1324 (size=83) 2024-11-23T17:29:03,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742149_1325 (size=83) 2024-11-23T17:29:03,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742149_1325 (size=83) 2024-11-23T17:29:03,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=132 2024-11-23T17:29:03,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=132 2024-11-23T17:29:04,057 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,,1732382943634.13b59b0ffd9e01bdd2d43f453a6b2a1a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:29:04,057 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1722): Closing 13b59b0ffd9e01bdd2d43f453a6b2a1a, disabling compactions & flushes 2024-11-23T17:29:04,057 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,,1732382943634.13b59b0ffd9e01bdd2d43f453a6b2a1a. 2024-11-23T17:29:04,057 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,,1732382943634.13b59b0ffd9e01bdd2d43f453a6b2a1a. 2024-11-23T17:29:04,057 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,,1732382943634.13b59b0ffd9e01bdd2d43f453a6b2a1a. after waiting 0 ms 2024-11-23T17:29:04,057 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,,1732382943634.13b59b0ffd9e01bdd2d43f453a6b2a1a. 2024-11-23T17:29:04,057 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,,1732382943634.13b59b0ffd9e01bdd2d43f453a6b2a1a. 2024-11-23T17:29:04,057 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1676): Region close journal for 13b59b0ffd9e01bdd2d43f453a6b2a1a: Waiting for close lock at 1732382944057Disabling compacts and flushes for region at 1732382944057Disabling writes for close at 1732382944057Writing region close event to WAL at 1732382944057Closed at 1732382944057 2024-11-23T17:29:04,058 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T17:29:04,059 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion,1,1732382943634.efb4f5fc98bc6102f21e07256724c719.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1732382944058"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732382944058"}]},"ts":"1732382944058"} 2024-11-23T17:29:04,059 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion,,1732382943634.13b59b0ffd9e01bdd2d43f453a6b2a1a.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1732382944058"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732382944058"}]},"ts":"1732382944058"} 2024-11-23T17:29:04,061 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-23T17:29:04,062 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T17:29:04,062 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732382944062"}]},"ts":"1732382944062"} 2024-11-23T17:29:04,063 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=ENABLING in hbase:meta 2024-11-23T17:29:04,064 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {215bb12e999f=0} racks are {/default-rack=0} 2024-11-23T17:29:04,065 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-23T17:29:04,065 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-23T17:29:04,065 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-23T17:29:04,065 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-23T17:29:04,065 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-23T17:29:04,065 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-23T17:29:04,065 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-23T17:29:04,065 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-23T17:29:04,065 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-23T17:29:04,065 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-23T17:29:04,065 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=133, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=13b59b0ffd9e01bdd2d43f453a6b2a1a, ASSIGN}, {pid=134, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=efb4f5fc98bc6102f21e07256724c719, ASSIGN}] 2024-11-23T17:29:04,066 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=134, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=efb4f5fc98bc6102f21e07256724c719, ASSIGN 2024-11-23T17:29:04,066 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=133, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=13b59b0ffd9e01bdd2d43f453a6b2a1a, ASSIGN 2024-11-23T17:29:04,067 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=133, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=13b59b0ffd9e01bdd2d43f453a6b2a1a, ASSIGN; state=OFFLINE, location=215bb12e999f,45467,1732382620632; forceNewPlan=false, retain=false 2024-11-23T17:29:04,067 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=134, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=efb4f5fc98bc6102f21e07256724c719, ASSIGN; state=OFFLINE, location=215bb12e999f,44667,1732382620918; forceNewPlan=false, retain=false 2024-11-23T17:29:04,217 INFO [215bb12e999f:39621 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-23T17:29:04,218 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=134 updating hbase:meta row=efb4f5fc98bc6102f21e07256724c719, regionState=OPENING, regionLocation=215bb12e999f,44667,1732382620918 2024-11-23T17:29:04,218 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=133 updating hbase:meta row=13b59b0ffd9e01bdd2d43f453a6b2a1a, regionState=OPENING, regionLocation=215bb12e999f,45467,1732382620632 2024-11-23T17:29:04,220 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=133, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=13b59b0ffd9e01bdd2d43f453a6b2a1a, ASSIGN because future has completed 2024-11-23T17:29:04,221 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=135, ppid=133, state=RUNNABLE, hasLock=false; OpenRegionProcedure 13b59b0ffd9e01bdd2d43f453a6b2a1a, server=215bb12e999f,45467,1732382620632}] 2024-11-23T17:29:04,221 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=134, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=efb4f5fc98bc6102f21e07256724c719, ASSIGN because future has completed 2024-11-23T17:29:04,221 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=136, ppid=134, state=RUNNABLE, hasLock=false; OpenRegionProcedure efb4f5fc98bc6102f21e07256724c719, server=215bb12e999f,44667,1732382620918}] 2024-11-23T17:29:04,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=132 2024-11-23T17:29:04,375 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion,,1732382943634.13b59b0ffd9e01bdd2d43f453a6b2a1a. 2024-11-23T17:29:04,375 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion,1,1732382943634.efb4f5fc98bc6102f21e07256724c719. 2024-11-23T17:29:04,375 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7752): Opening region: {ENCODED => 13b59b0ffd9e01bdd2d43f453a6b2a1a, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1732382943634.13b59b0ffd9e01bdd2d43f453a6b2a1a.', STARTKEY => '', ENDKEY => '1'} 2024-11-23T17:29:04,375 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(7752): Opening region: {ENCODED => efb4f5fc98bc6102f21e07256724c719, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1732382943634.efb4f5fc98bc6102f21e07256724c719.', STARTKEY => '1', ENDKEY => ''} 2024-11-23T17:29:04,376 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion,,1732382943634.13b59b0ffd9e01bdd2d43f453a6b2a1a. service=AccessControlService 2024-11-23T17:29:04,376 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion,1,1732382943634.efb4f5fc98bc6102f21e07256724c719. service=AccessControlService 2024-11-23T17:29:04,376 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:29:04,376 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:29:04,376 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion efb4f5fc98bc6102f21e07256724c719 2024-11-23T17:29:04,376 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion 13b59b0ffd9e01bdd2d43f453a6b2a1a 2024-11-23T17:29:04,376 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,,1732382943634.13b59b0ffd9e01bdd2d43f453a6b2a1a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:29:04,376 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,1,1732382943634.efb4f5fc98bc6102f21e07256724c719.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:29:04,376 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7794): checking encryption for 13b59b0ffd9e01bdd2d43f453a6b2a1a 2024-11-23T17:29:04,376 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(7794): checking encryption for efb4f5fc98bc6102f21e07256724c719 2024-11-23T17:29:04,376 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7797): checking classloading for 13b59b0ffd9e01bdd2d43f453a6b2a1a 2024-11-23T17:29:04,376 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(7797): checking classloading for efb4f5fc98bc6102f21e07256724c719 2024-11-23T17:29:04,377 INFO [StoreOpener-efb4f5fc98bc6102f21e07256724c719-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region efb4f5fc98bc6102f21e07256724c719 2024-11-23T17:29:04,377 INFO [StoreOpener-13b59b0ffd9e01bdd2d43f453a6b2a1a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 13b59b0ffd9e01bdd2d43f453a6b2a1a 2024-11-23T17:29:04,379 INFO [StoreOpener-13b59b0ffd9e01bdd2d43f453a6b2a1a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 13b59b0ffd9e01bdd2d43f453a6b2a1a columnFamilyName cf 2024-11-23T17:29:04,379 INFO [StoreOpener-efb4f5fc98bc6102f21e07256724c719-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region efb4f5fc98bc6102f21e07256724c719 columnFamilyName cf 2024-11-23T17:29:04,379 DEBUG [StoreOpener-13b59b0ffd9e01bdd2d43f453a6b2a1a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:29:04,379 DEBUG [StoreOpener-efb4f5fc98bc6102f21e07256724c719-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:29:04,379 INFO [StoreOpener-efb4f5fc98bc6102f21e07256724c719-1 {}] regionserver.HStore(327): Store=efb4f5fc98bc6102f21e07256724c719/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T17:29:04,379 INFO [StoreOpener-13b59b0ffd9e01bdd2d43f453a6b2a1a-1 {}] regionserver.HStore(327): Store=13b59b0ffd9e01bdd2d43f453a6b2a1a/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T17:29:04,379 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1038): replaying wal for efb4f5fc98bc6102f21e07256724c719 2024-11-23T17:29:04,379 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1038): replaying wal for 13b59b0ffd9e01bdd2d43f453a6b2a1a 2024-11-23T17:29:04,380 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion/13b59b0ffd9e01bdd2d43f453a6b2a1a 2024-11-23T17:29:04,380 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion/efb4f5fc98bc6102f21e07256724c719 2024-11-23T17:29:04,380 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion/13b59b0ffd9e01bdd2d43f453a6b2a1a 2024-11-23T17:29:04,380 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion/efb4f5fc98bc6102f21e07256724c719 2024-11-23T17:29:04,380 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1048): stopping wal replay for efb4f5fc98bc6102f21e07256724c719 2024-11-23T17:29:04,380 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1048): stopping wal replay for 13b59b0ffd9e01bdd2d43f453a6b2a1a 2024-11-23T17:29:04,381 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1060): Cleaning up temporary data for efb4f5fc98bc6102f21e07256724c719 2024-11-23T17:29:04,381 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1060): Cleaning up temporary data for 13b59b0ffd9e01bdd2d43f453a6b2a1a 2024-11-23T17:29:04,382 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1093): writing seq id for efb4f5fc98bc6102f21e07256724c719 2024-11-23T17:29:04,382 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1093): writing seq id for 13b59b0ffd9e01bdd2d43f453a6b2a1a 2024-11-23T17:29:04,383 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion/efb4f5fc98bc6102f21e07256724c719/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T17:29:04,383 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion/13b59b0ffd9e01bdd2d43f453a6b2a1a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T17:29:04,384 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1114): Opened 13b59b0ffd9e01bdd2d43f453a6b2a1a; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71160924, jitterRate=0.060380399227142334}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T17:29:04,384 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1114): Opened efb4f5fc98bc6102f21e07256724c719; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63533609, jitterRate=-0.0532754510641098}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T17:29:04,384 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1122): Running coprocessor post-open hooks for efb4f5fc98bc6102f21e07256724c719 2024-11-23T17:29:04,384 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 13b59b0ffd9e01bdd2d43f453a6b2a1a 2024-11-23T17:29:04,384 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1006): Region open journal for 13b59b0ffd9e01bdd2d43f453a6b2a1a: Running coprocessor pre-open hook at 1732382944376Writing region info on filesystem at 1732382944376Initializing all the Stores at 1732382944377 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732382944377Cleaning up temporary data from old regions at 1732382944381 (+4 ms)Running coprocessor post-open hooks at 1732382944384 (+3 ms)Region opened successfully at 1732382944384 2024-11-23T17:29:04,384 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1006): Region open journal for efb4f5fc98bc6102f21e07256724c719: Running coprocessor pre-open hook at 1732382944376Writing region info on filesystem at 1732382944376Initializing all the Stores at 1732382944377 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732382944377Cleaning up temporary data from old regions at 1732382944381 (+4 ms)Running coprocessor post-open hooks at 1732382944384 (+3 ms)Region opened successfully at 1732382944384 2024-11-23T17:29:04,385 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion,,1732382943634.13b59b0ffd9e01bdd2d43f453a6b2a1a., pid=135, masterSystemTime=1732382944372 2024-11-23T17:29:04,385 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion,1,1732382943634.efb4f5fc98bc6102f21e07256724c719., pid=136, masterSystemTime=1732382944373 2024-11-23T17:29:04,386 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion,,1732382943634.13b59b0ffd9e01bdd2d43f453a6b2a1a. 2024-11-23T17:29:04,386 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion,,1732382943634.13b59b0ffd9e01bdd2d43f453a6b2a1a. 2024-11-23T17:29:04,387 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=133 updating hbase:meta row=13b59b0ffd9e01bdd2d43f453a6b2a1a, regionState=OPEN, openSeqNum=2, regionLocation=215bb12e999f,45467,1732382620632 2024-11-23T17:29:04,387 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion,1,1732382943634.efb4f5fc98bc6102f21e07256724c719. 2024-11-23T17:29:04,387 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion,1,1732382943634.efb4f5fc98bc6102f21e07256724c719. 2024-11-23T17:29:04,387 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=134 updating hbase:meta row=efb4f5fc98bc6102f21e07256724c719, regionState=OPEN, openSeqNum=2, regionLocation=215bb12e999f,44667,1732382620918 2024-11-23T17:29:04,388 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=135, ppid=133, state=RUNNABLE, hasLock=false; OpenRegionProcedure 13b59b0ffd9e01bdd2d43f453a6b2a1a, server=215bb12e999f,45467,1732382620632 because future has completed 2024-11-23T17:29:04,389 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=136, ppid=134, state=RUNNABLE, hasLock=false; OpenRegionProcedure efb4f5fc98bc6102f21e07256724c719, server=215bb12e999f,44667,1732382620918 because future has completed 2024-11-23T17:29:04,390 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=135, resume processing ppid=133 2024-11-23T17:29:04,391 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=135, ppid=133, state=SUCCESS, hasLock=false; OpenRegionProcedure 13b59b0ffd9e01bdd2d43f453a6b2a1a, server=215bb12e999f,45467,1732382620632 in 169 msec 2024-11-23T17:29:04,391 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=136, resume processing ppid=134 2024-11-23T17:29:04,392 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=136, ppid=134, state=SUCCESS, hasLock=false; OpenRegionProcedure efb4f5fc98bc6102f21e07256724c719, server=215bb12e999f,44667,1732382620918 in 169 msec 2024-11-23T17:29:04,392 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=133, ppid=132, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=13b59b0ffd9e01bdd2d43f453a6b2a1a, ASSIGN in 326 msec 2024-11-23T17:29:04,393 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=134, resume processing ppid=132 2024-11-23T17:29:04,393 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=134, ppid=132, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=efb4f5fc98bc6102f21e07256724c719, ASSIGN in 327 msec 2024-11-23T17:29:04,393 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T17:29:04,394 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732382944393"}]},"ts":"1732382944393"} 2024-11-23T17:29:04,395 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=ENABLED in hbase:meta 2024-11-23T17:29:04,396 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T17:29:04,396 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithMergeRegion jenkins: RWXCA 2024-11-23T17:29:04,399 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41065 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-11-23T17:29:04,447 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x10168b8ee290003, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:29:04,447 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45467-0x10168b8ee290001, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:29:04,447 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:29:04,447 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44667-0x10168b8ee290002, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:29:04,458 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T17:29:04,458 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T17:29:04,459 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T17:29:04,459 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T17:29:04,460 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=132, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 824 msec 2024-11-23T17:29:04,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=132 2024-11-23T17:29:04,776 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-11-23T17:29:04,776 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportFileSystemStateWithMergeRegion get assigned. Timeout = 60000ms 2024-11-23T17:29:04,776 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T17:29:04,781 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportFileSystemStateWithMergeRegion assigned to meta. Checking AM states. 2024-11-23T17:29:04,781 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T17:29:04,781 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportFileSystemStateWithMergeRegion assigned. 2024-11-23T17:29:04,781 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-23T17:29:04,784 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-11-23T17:29:04,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732382944784 (current time:1732382944784). 2024-11-23T17:29:04,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-23T17:29:04,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion VERSION not specified, setting to 2 2024-11-23T17:29:04,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-23T17:29:04,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@573968a0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:29:04,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.ClusterIdFetcher(90): Going to request 215bb12e999f,39621,-1 for getting cluster id 2024-11-23T17:29:04,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:29:04,787 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d' 2024-11-23T17:29:04,787 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:29:04,787 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d" 2024-11-23T17:29:04,787 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1d8b92f1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:29:04,787 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [215bb12e999f,39621,-1] 2024-11-23T17:29:04,788 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:29:04,788 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:29:04,789 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58452, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:29:04,789 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@37405930, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:29:04,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:29:04,791 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=215bb12e999f,44667,1732382620918, seqNum=-1] 2024-11-23T17:29:04,791 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:29:04,792 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59342, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:29:04,794 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621. 2024-11-23T17:29:04,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:29:04,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:29:04,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:29:04,794 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:29:04,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5ebcaa11, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:29:04,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.ClusterIdFetcher(90): Going to request 215bb12e999f,39621,-1 for getting cluster id 2024-11-23T17:29:04,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:29:04,796 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d' 2024-11-23T17:29:04,797 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:29:04,797 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d" 2024-11-23T17:29:04,797 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5a138ca0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:29:04,797 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [215bb12e999f,39621,-1] 2024-11-23T17:29:04,797 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:29:04,797 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:29:04,798 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58468, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:29:04,799 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@f6b6aea, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:29:04,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:29:04,801 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=215bb12e999f,44667,1732382620918, seqNum=-1] 2024-11-23T17:29:04,801 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:29:04,802 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59358, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:29:04,804 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion', locateType=CURRENT is [region=hbase:acl,,1732382625522.41264850465b123187f03a78b2521b39., hostname=215bb12e999f,41065,1732382621000, seqNum=2] 2024-11-23T17:29:04,804 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:29:04,806 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57182, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:29:04,808 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621. 2024-11-23T17:29:04,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor297.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:29:04,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:29:04,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:29:04,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-11-23T17:29:04,809 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:29:04,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-23T17:29:04,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure2.ProcedureExecutor(1139): Stored pid=137, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-11-23T17:29:04,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 137 2024-11-23T17:29:04,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=137 2024-11-23T17:29:04,812 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-23T17:29:04,814 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-23T17:29:04,817 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-23T17:29:04,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742150_1326 (size=215) 2024-11-23T17:29:04,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742150_1326 (size=215) 2024-11-23T17:29:04,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742150_1326 (size=215) 2024-11-23T17:29:04,829 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-23T17:29:04,829 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=138, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 13b59b0ffd9e01bdd2d43f453a6b2a1a}, {pid=139, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure efb4f5fc98bc6102f21e07256724c719}] 2024-11-23T17:29:04,831 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=138, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 13b59b0ffd9e01bdd2d43f453a6b2a1a 2024-11-23T17:29:04,831 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=139, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure efb4f5fc98bc6102f21e07256724c719 2024-11-23T17:29:04,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=137 2024-11-23T17:29:04,988 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45467 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=138 2024-11-23T17:29:04,988 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44667 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=139 2024-11-23T17:29:04,988 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1732382943634.13b59b0ffd9e01bdd2d43f453a6b2a1a. 2024-11-23T17:29:04,988 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1732382943634.efb4f5fc98bc6102f21e07256724c719. 2024-11-23T17:29:04,989 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.HRegion(2603): Flush status journal for 13b59b0ffd9e01bdd2d43f453a6b2a1a: 2024-11-23T17:29:04,989 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,,1732382943634.13b59b0ffd9e01bdd2d43f453a6b2a1a. for emptySnaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-11-23T17:29:04,989 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,,1732382943634.13b59b0ffd9e01bdd2d43f453a6b2a1a.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-23T17:29:04,989 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:29:04,989 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-23T17:29:04,989 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.HRegion(2603): Flush status journal for efb4f5fc98bc6102f21e07256724c719: 2024-11-23T17:29:04,989 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,1,1732382943634.efb4f5fc98bc6102f21e07256724c719. for emptySnaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-11-23T17:29:04,989 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,1,1732382943634.efb4f5fc98bc6102f21e07256724c719.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-23T17:29:04,989 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:29:04,990 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-23T17:29:05,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742151_1327 (size=86) 2024-11-23T17:29:05,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742151_1327 (size=86) 2024-11-23T17:29:05,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742151_1327 (size=86) 2024-11-23T17:29:05,026 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1732382943634.13b59b0ffd9e01bdd2d43f453a6b2a1a. 2024-11-23T17:29:05,026 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=138 2024-11-23T17:29:05,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.HMaster(4169): Remote procedure done, pid=138 2024-11-23T17:29:05,027 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion on region 13b59b0ffd9e01bdd2d43f453a6b2a1a 2024-11-23T17:29:05,027 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=138, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 13b59b0ffd9e01bdd2d43f453a6b2a1a 2024-11-23T17:29:05,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742152_1328 (size=86) 2024-11-23T17:29:05,030 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=138, ppid=137, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 13b59b0ffd9e01bdd2d43f453a6b2a1a in 199 msec 2024-11-23T17:29:05,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742152_1328 (size=86) 2024-11-23T17:29:05,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742152_1328 (size=86) 2024-11-23T17:29:05,030 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1732382943634.efb4f5fc98bc6102f21e07256724c719. 2024-11-23T17:29:05,031 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=139 2024-11-23T17:29:05,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.HMaster(4169): Remote procedure done, pid=139 2024-11-23T17:29:05,031 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion on region efb4f5fc98bc6102f21e07256724c719 2024-11-23T17:29:05,031 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=139, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure efb4f5fc98bc6102f21e07256724c719 2024-11-23T17:29:05,035 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=139, resume processing ppid=137 2024-11-23T17:29:05,035 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-23T17:29:05,036 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=139, ppid=137, state=SUCCESS, hasLock=false; SnapshotRegionProcedure efb4f5fc98bc6102f21e07256724c719 in 203 msec 2024-11-23T17:29:05,036 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-23T17:29:05,037 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-23T17:29:05,037 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-23T17:29:05,038 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-23T17:29:05,055 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-1_3/usercache/jenkins/appcache/application_1732382632246_0006/container_1732382632246_0006_01_000003/launch_container.sh] 2024-11-23T17:29:05,056 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-1_3/usercache/jenkins/appcache/application_1732382632246_0006/container_1732382632246_0006_01_000003/container_tokens] 2024-11-23T17:29:05,056 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-1_3/usercache/jenkins/appcache/application_1732382632246_0006/container_1732382632246_0006_01_000003/sysfs] 2024-11-23T17:29:05,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742153_1329 (size=597) 2024-11-23T17:29:05,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742153_1329 (size=597) 2024-11-23T17:29:05,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742153_1329 (size=597) 2024-11-23T17:29:05,082 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-23T17:29:05,086 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-23T17:29:05,087 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithMergeRegion to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-23T17:29:05,088 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-23T17:29:05,088 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 137 2024-11-23T17:29:05,090 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=137, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } in 279 msec 2024-11-23T17:29:05,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=137 2024-11-23T17:29:05,127 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-11-23T17:29:05,131 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion', row='09bb9f2ea95b68ae04037564c9a0d0320', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion,,1732382943634.13b59b0ffd9e01bdd2d43f453a6b2a1a., hostname=215bb12e999f,45467,1732382620632, seqNum=2] 2024-11-23T17:29:05,131 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion', row='10ced84720a384a05c461912628e90650', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion,1,1732382943634.efb4f5fc98bc6102f21e07256724c719., hostname=215bb12e999f,44667,1732382620918, seqNum=2] 2024-11-23T17:29:05,134 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion', row='3766c4263ff6bd15a8fe26f11811d47cf', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion,1,1732382943634.efb4f5fc98bc6102f21e07256724c719., hostname=215bb12e999f,44667,1732382620918, seqNum=2] 2024-11-23T17:29:05,135 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion', row='29b3a52de7fe629fe15ad2a0b60fef989', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion,1,1732382943634.efb4f5fc98bc6102f21e07256724c719., hostname=215bb12e999f,44667,1732382620918, seqNum=2] 2024-11-23T17:29:05,136 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion', row='40fa6d10457bcf63d91cb7ac187ebb1f0', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion,1,1732382943634.efb4f5fc98bc6102f21e07256724c719., hostname=215bb12e999f,44667,1732382620918, seqNum=2] 2024-11-23T17:29:05,138 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45467 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithMergeRegion,,1732382943634.13b59b0ffd9e01bdd2d43f453a6b2a1a. with WAL disabled. Data may be lost in the event of a crash. 2024-11-23T17:29:05,140 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44667 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithMergeRegion,1,1732382943634.efb4f5fc98bc6102f21e07256724c719. with WAL disabled. Data may be lost in the event of a crash. 2024-11-23T17:29:05,142 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-23T17:29:05,145 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithMergeRegion 2024-11-23T17:29:05,145 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithMergeRegion,,1732382943634.13b59b0ffd9e01bdd2d43f453a6b2a1a. 2024-11-23T17:29:05,146 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T17:29:05,148 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-23T17:29:05,154 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-23T17:29:05,161 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-23T17:29:05,164 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-11-23T17:29:05,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732382945164 (current time:1732382945164). 2024-11-23T17:29:05,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-23T17:29:05,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithMergeRegion VERSION not specified, setting to 2 2024-11-23T17:29:05,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-23T17:29:05,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7547c61a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:29:05,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.ClusterIdFetcher(90): Going to request 215bb12e999f,39621,-1 for getting cluster id 2024-11-23T17:29:05,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:29:05,166 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d' 2024-11-23T17:29:05,166 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:29:05,166 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d" 2024-11-23T17:29:05,166 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@59108cf9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:29:05,166 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [215bb12e999f,39621,-1] 2024-11-23T17:29:05,166 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:29:05,166 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:29:05,167 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58478, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:29:05,168 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@374aad07, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:29:05,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:29:05,168 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=215bb12e999f,44667,1732382620918, seqNum=-1] 2024-11-23T17:29:05,169 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:29:05,169 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59370, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:29:05,171 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621. 2024-11-23T17:29:05,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:29:05,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:29:05,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:29:05,171 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:29:05,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@59a94f5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:29:05,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.ClusterIdFetcher(90): Going to request 215bb12e999f,39621,-1 for getting cluster id 2024-11-23T17:29:05,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:29:05,172 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d' 2024-11-23T17:29:05,173 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:29:05,173 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d" 2024-11-23T17:29:05,173 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@56cb489c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:29:05,173 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [215bb12e999f,39621,-1] 2024-11-23T17:29:05,173 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:29:05,173 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:29:05,174 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58494, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:29:05,174 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@770e7af9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:29:05,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:29:05,176 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=215bb12e999f,44667,1732382620918, seqNum=-1] 2024-11-23T17:29:05,176 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:29:05,177 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59380, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:29:05,178 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion', locateType=CURRENT is [region=hbase:acl,,1732382625522.41264850465b123187f03a78b2521b39., hostname=215bb12e999f,41065,1732382621000, seqNum=2] 2024-11-23T17:29:05,178 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:29:05,179 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57190, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:29:05,181 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621. 2024-11-23T17:29:05,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor297.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:29:05,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:29:05,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:29:05,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-11-23T17:29:05,181 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:29:05,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-23T17:29:05,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure2.ProcedureExecutor(1139): Stored pid=140, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-11-23T17:29:05,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 140 2024-11-23T17:29:05,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=140 2024-11-23T17:29:05,187 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-23T17:29:05,188 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-23T17:29:05,191 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-23T17:29:05,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742154_1330 (size=210) 2024-11-23T17:29:05,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742154_1330 (size=210) 2024-11-23T17:29:05,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742154_1330 (size=210) 2024-11-23T17:29:05,215 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-23T17:29:05,215 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=141, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 13b59b0ffd9e01bdd2d43f453a6b2a1a}, {pid=142, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure efb4f5fc98bc6102f21e07256724c719}] 2024-11-23T17:29:05,216 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=142, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure efb4f5fc98bc6102f21e07256724c719 2024-11-23T17:29:05,216 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=141, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 13b59b0ffd9e01bdd2d43f453a6b2a1a 2024-11-23T17:29:05,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=140 2024-11-23T17:29:05,369 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45467 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=141 2024-11-23T17:29:05,370 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44667 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=142 2024-11-23T17:29:05,370 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1732382943634.efb4f5fc98bc6102f21e07256724c719. 2024-11-23T17:29:05,370 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1732382943634.13b59b0ffd9e01bdd2d43f453a6b2a1a. 2024-11-23T17:29:05,370 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(2902): Flushing 13b59b0ffd9e01bdd2d43f453a6b2a1a 1/1 column families, dataSize=266 B heapSize=832 B 2024-11-23T17:29:05,370 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegion(2902): Flushing efb4f5fc98bc6102f21e07256724c719 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-11-23T17:29:05,392 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion/efb4f5fc98bc6102f21e07256724c719/.tmp/cf/cdd3ccc9b60a40179233fa9503d9c8d2 is 71, key is 10eac3307a636eda6b3c9e4c1e72f221/cf:q/1732382945140/Put/seqid=0 2024-11-23T17:29:05,392 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion/13b59b0ffd9e01bdd2d43f453a6b2a1a/.tmp/cf/4ac9821d005249adbbfabd0ed228e94d is 71, key is 0a57b0e8b05533a3a85c1a5f4f210c52/cf:q/1732382945138/Put/seqid=0 2024-11-23T17:29:05,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742155_1331 (size=5356) 2024-11-23T17:29:05,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742155_1331 (size=5356) 2024-11-23T17:29:05,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742155_1331 (size=5356) 2024-11-23T17:29:05,428 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=266 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion/13b59b0ffd9e01bdd2d43f453a6b2a1a/.tmp/cf/4ac9821d005249adbbfabd0ed228e94d 2024-11-23T17:29:05,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742156_1332 (size=8258) 2024-11-23T17:29:05,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742156_1332 (size=8258) 2024-11-23T17:29:05,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742156_1332 (size=8258) 2024-11-23T17:29:05,438 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion/13b59b0ffd9e01bdd2d43f453a6b2a1a/.tmp/cf/4ac9821d005249adbbfabd0ed228e94d as hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion/13b59b0ffd9e01bdd2d43f453a6b2a1a/cf/4ac9821d005249adbbfabd0ed228e94d 2024-11-23T17:29:05,438 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.00 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion/efb4f5fc98bc6102f21e07256724c719/.tmp/cf/cdd3ccc9b60a40179233fa9503d9c8d2 2024-11-23T17:29:05,446 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion/13b59b0ffd9e01bdd2d43f453a6b2a1a/cf/4ac9821d005249adbbfabd0ed228e94d, entries=4, sequenceid=6, filesize=5.2 K 2024-11-23T17:29:05,447 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(3140): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for 13b59b0ffd9e01bdd2d43f453a6b2a1a in 76ms, sequenceid=6, compaction requested=false 2024-11-23T17:29:05,447 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion' 2024-11-23T17:29:05,447 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(2603): Flush status journal for 13b59b0ffd9e01bdd2d43f453a6b2a1a: 2024-11-23T17:29:05,447 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,,1732382943634.13b59b0ffd9e01bdd2d43f453a6b2a1a. for snaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-11-23T17:29:05,448 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,,1732382943634.13b59b0ffd9e01bdd2d43f453a6b2a1a.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-23T17:29:05,448 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:29:05,448 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion/13b59b0ffd9e01bdd2d43f453a6b2a1a/cf/4ac9821d005249adbbfabd0ed228e94d] hfiles 2024-11-23T17:29:05,448 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion/13b59b0ffd9e01bdd2d43f453a6b2a1a/cf/4ac9821d005249adbbfabd0ed228e94d for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-23T17:29:05,455 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion/efb4f5fc98bc6102f21e07256724c719/.tmp/cf/cdd3ccc9b60a40179233fa9503d9c8d2 as hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion/efb4f5fc98bc6102f21e07256724c719/cf/cdd3ccc9b60a40179233fa9503d9c8d2 2024-11-23T17:29:05,462 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion/efb4f5fc98bc6102f21e07256724c719/cf/cdd3ccc9b60a40179233fa9503d9c8d2, entries=46, sequenceid=6, filesize=8.1 K 2024-11-23T17:29:05,463 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegion(3140): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for efb4f5fc98bc6102f21e07256724c719 in 93ms, sequenceid=6, compaction requested=false 2024-11-23T17:29:05,463 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegion(2603): Flush status journal for efb4f5fc98bc6102f21e07256724c719: 2024-11-23T17:29:05,463 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,1,1732382943634.efb4f5fc98bc6102f21e07256724c719. for snaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-11-23T17:29:05,464 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,1,1732382943634.efb4f5fc98bc6102f21e07256724c719.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-23T17:29:05,464 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:29:05,464 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion/efb4f5fc98bc6102f21e07256724c719/cf/cdd3ccc9b60a40179233fa9503d9c8d2] hfiles 2024-11-23T17:29:05,464 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion/efb4f5fc98bc6102f21e07256724c719/cf/cdd3ccc9b60a40179233fa9503d9c8d2 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-23T17:29:05,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=140 2024-11-23T17:29:05,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742157_1333 (size=125) 2024-11-23T17:29:05,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742157_1333 (size=125) 2024-11-23T17:29:05,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742157_1333 (size=125) 2024-11-23T17:29:05,532 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1732382943634.13b59b0ffd9e01bdd2d43f453a6b2a1a. 2024-11-23T17:29:05,532 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=141 2024-11-23T17:29:05,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.HMaster(4169): Remote procedure done, pid=141 2024-11-23T17:29:05,533 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion on region 13b59b0ffd9e01bdd2d43f453a6b2a1a 2024-11-23T17:29:05,533 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=141, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 13b59b0ffd9e01bdd2d43f453a6b2a1a 2024-11-23T17:29:05,538 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=141, ppid=140, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 13b59b0ffd9e01bdd2d43f453a6b2a1a in 319 msec 2024-11-23T17:29:05,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742158_1334 (size=125) 2024-11-23T17:29:05,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742158_1334 (size=125) 2024-11-23T17:29:05,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742158_1334 (size=125) 2024-11-23T17:29:05,563 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1732382943634.efb4f5fc98bc6102f21e07256724c719. 2024-11-23T17:29:05,564 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=142 2024-11-23T17:29:05,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.HMaster(4169): Remote procedure done, pid=142 2024-11-23T17:29:05,564 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion on region efb4f5fc98bc6102f21e07256724c719 2024-11-23T17:29:05,564 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=142, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure efb4f5fc98bc6102f21e07256724c719 2024-11-23T17:29:05,567 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=142, resume processing ppid=140 2024-11-23T17:29:05,567 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=142, ppid=140, state=SUCCESS, hasLock=false; SnapshotRegionProcedure efb4f5fc98bc6102f21e07256724c719 in 350 msec 2024-11-23T17:29:05,567 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-23T17:29:05,568 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-23T17:29:05,569 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-23T17:29:05,569 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-23T17:29:05,570 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-23T17:29:05,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742159_1335 (size=675) 2024-11-23T17:29:05,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742159_1335 (size=675) 2024-11-23T17:29:05,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742159_1335 (size=675) 2024-11-23T17:29:05,653 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-23T17:29:05,665 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-23T17:29:05,666 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-23T17:29:05,668 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-23T17:29:05,668 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 140 2024-11-23T17:29:05,671 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=140, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } in 486 msec 2024-11-23T17:29:05,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=140 2024-11-23T17:29:05,807 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-11-23T17:29:05,810 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-23T17:29:05,820 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-23T17:29:05,822 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57198, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-23T17:29:05,824 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-23T17:29:05,825 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59382, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-23T17:29:05,826 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53674, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-23T17:29:05,834 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithMergeRegion-1', {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T17:29:05,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure2.ProcedureExecutor(1139): Stored pid=143, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-23T17:29:05,838 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T17:29:05,838 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:29:05,839 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithMergeRegion-1" procId is: 143 2024-11-23T17:29:05,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-11-23T17:29:05,843 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T17:29:05,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742160_1336 (size=399) 2024-11-23T17:29:05,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742160_1336 (size=399) 2024-11-23T17:29:05,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742160_1336 (size=399) 2024-11-23T17:29:05,877 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 577159f9aec0098b47a0f1bb1df4440f, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1732382945833.577159f9aec0098b47a0f1bb1df4440f.', STARTKEY => '', ENDKEY => '2'}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion-1', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf 2024-11-23T17:29:05,878 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => bb1819f22a90703b77321b72be17ace1, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,2,1732382945833.bb1819f22a90703b77321b72be17ace1.', STARTKEY => '2', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion-1', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf 2024-11-23T17:29:05,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742162_1338 (size=85) 2024-11-23T17:29:05,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742162_1338 (size=85) 2024-11-23T17:29:05,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742162_1338 (size=85) 2024-11-23T17:29:05,945 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,2,1732382945833.bb1819f22a90703b77321b72be17ace1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:29:05,946 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1722): Closing bb1819f22a90703b77321b72be17ace1, disabling compactions & flushes 2024-11-23T17:29:05,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-11-23T17:29:05,946 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,2,1732382945833.bb1819f22a90703b77321b72be17ace1. 2024-11-23T17:29:05,946 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1732382945833.bb1819f22a90703b77321b72be17ace1. 2024-11-23T17:29:05,946 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1732382945833.bb1819f22a90703b77321b72be17ace1. after waiting 0 ms 2024-11-23T17:29:05,946 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,2,1732382945833.bb1819f22a90703b77321b72be17ace1. 2024-11-23T17:29:05,946 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,2,1732382945833.bb1819f22a90703b77321b72be17ace1. 2024-11-23T17:29:05,946 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1676): Region close journal for bb1819f22a90703b77321b72be17ace1: Waiting for close lock at 1732382945945Disabling compacts and flushes for region at 1732382945945Disabling writes for close at 1732382945946 (+1 ms)Writing region close event to WAL at 1732382945946Closed at 1732382945946 2024-11-23T17:29:05,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742161_1337 (size=85) 2024-11-23T17:29:05,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742161_1337 (size=85) 2024-11-23T17:29:05,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742161_1337 (size=85) 2024-11-23T17:29:05,959 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1732382945833.577159f9aec0098b47a0f1bb1df4440f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:29:05,959 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1722): Closing 577159f9aec0098b47a0f1bb1df4440f, disabling compactions & flushes 2024-11-23T17:29:05,959 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1732382945833.577159f9aec0098b47a0f1bb1df4440f. 2024-11-23T17:29:05,959 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1732382945833.577159f9aec0098b47a0f1bb1df4440f. 2024-11-23T17:29:05,960 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1732382945833.577159f9aec0098b47a0f1bb1df4440f. after waiting 0 ms 2024-11-23T17:29:05,960 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1732382945833.577159f9aec0098b47a0f1bb1df4440f. 2024-11-23T17:29:05,960 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1732382945833.577159f9aec0098b47a0f1bb1df4440f. 2024-11-23T17:29:05,960 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1676): Region close journal for 577159f9aec0098b47a0f1bb1df4440f: Waiting for close lock at 1732382945959Disabling compacts and flushes for region at 1732382945959Disabling writes for close at 1732382945960 (+1 ms)Writing region close event to WAL at 1732382945960Closed at 1732382945960 2024-11-23T17:29:05,961 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T17:29:05,961 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,2,1732382945833.bb1819f22a90703b77321b72be17ace1.","families":{"info":[{"qualifier":"regioninfo","vlen":84,"tag":[],"timestamp":"1732382945961"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732382945961"}]},"ts":"1732382945961"} 2024-11-23T17:29:05,961 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1732382945833.577159f9aec0098b47a0f1bb1df4440f.","families":{"info":[{"qualifier":"regioninfo","vlen":84,"tag":[],"timestamp":"1732382945961"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732382945961"}]},"ts":"1732382945961"} 2024-11-23T17:29:05,964 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-23T17:29:05,965 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T17:29:05,965 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732382945965"}]},"ts":"1732382945965"} 2024-11-23T17:29:05,968 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=ENABLING in hbase:meta 2024-11-23T17:29:05,968 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {215bb12e999f=0} racks are {/default-rack=0} 2024-11-23T17:29:05,969 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-23T17:29:05,970 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-23T17:29:05,970 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-23T17:29:05,970 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-23T17:29:05,970 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-23T17:29:05,970 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-23T17:29:05,970 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-23T17:29:05,970 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-23T17:29:05,970 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-23T17:29:05,970 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-23T17:29:05,970 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=577159f9aec0098b47a0f1bb1df4440f, ASSIGN}, {pid=145, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=bb1819f22a90703b77321b72be17ace1, ASSIGN}] 2024-11-23T17:29:05,971 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=145, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=bb1819f22a90703b77321b72be17ace1, ASSIGN 2024-11-23T17:29:05,972 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=577159f9aec0098b47a0f1bb1df4440f, ASSIGN 2024-11-23T17:29:05,973 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=577159f9aec0098b47a0f1bb1df4440f, ASSIGN; state=OFFLINE, location=215bb12e999f,41065,1732382621000; forceNewPlan=false, retain=false 2024-11-23T17:29:05,973 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=145, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=bb1819f22a90703b77321b72be17ace1, ASSIGN; state=OFFLINE, location=215bb12e999f,45467,1732382620632; forceNewPlan=false, retain=false 2024-11-23T17:29:06,123 INFO [215bb12e999f:39621 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-23T17:29:06,124 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=144 updating hbase:meta row=577159f9aec0098b47a0f1bb1df4440f, regionState=OPENING, regionLocation=215bb12e999f,41065,1732382621000 2024-11-23T17:29:06,124 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=145 updating hbase:meta row=bb1819f22a90703b77321b72be17ace1, regionState=OPENING, regionLocation=215bb12e999f,45467,1732382620632 2024-11-23T17:29:06,126 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=145, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=bb1819f22a90703b77321b72be17ace1, ASSIGN because future has completed 2024-11-23T17:29:06,127 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=146, ppid=145, state=RUNNABLE, hasLock=false; OpenRegionProcedure bb1819f22a90703b77321b72be17ace1, server=215bb12e999f,45467,1732382620632}] 2024-11-23T17:29:06,127 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=577159f9aec0098b47a0f1bb1df4440f, ASSIGN because future has completed 2024-11-23T17:29:06,129 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=147, ppid=144, state=RUNNABLE, hasLock=false; OpenRegionProcedure 577159f9aec0098b47a0f1bb1df4440f, server=215bb12e999f,41065,1732382621000}] 2024-11-23T17:29:06,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-11-23T17:29:06,282 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,2,1732382945833.bb1819f22a90703b77321b72be17ace1. 2024-11-23T17:29:06,283 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7752): Opening region: {ENCODED => bb1819f22a90703b77321b72be17ace1, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,2,1732382945833.bb1819f22a90703b77321b72be17ace1.', STARTKEY => '2', ENDKEY => ''} 2024-11-23T17:29:06,283 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,2,1732382945833.bb1819f22a90703b77321b72be17ace1. service=AccessControlService 2024-11-23T17:29:06,283 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:29:06,283 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 bb1819f22a90703b77321b72be17ace1 2024-11-23T17:29:06,283 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,2,1732382945833.bb1819f22a90703b77321b72be17ace1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:29:06,284 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7794): checking encryption for bb1819f22a90703b77321b72be17ace1 2024-11-23T17:29:06,284 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7797): checking classloading for bb1819f22a90703b77321b72be17ace1 2024-11-23T17:29:06,289 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,,1732382945833.577159f9aec0098b47a0f1bb1df4440f. 2024-11-23T17:29:06,289 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(7752): Opening region: {ENCODED => 577159f9aec0098b47a0f1bb1df4440f, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1732382945833.577159f9aec0098b47a0f1bb1df4440f.', STARTKEY => '', ENDKEY => '2'} 2024-11-23T17:29:06,289 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,,1732382945833.577159f9aec0098b47a0f1bb1df4440f. service=AccessControlService 2024-11-23T17:29:06,289 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:29:06,289 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 577159f9aec0098b47a0f1bb1df4440f 2024-11-23T17:29:06,289 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1732382945833.577159f9aec0098b47a0f1bb1df4440f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:29:06,290 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(7794): checking encryption for 577159f9aec0098b47a0f1bb1df4440f 2024-11-23T17:29:06,290 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(7797): checking classloading for 577159f9aec0098b47a0f1bb1df4440f 2024-11-23T17:29:06,290 INFO [StoreOpener-bb1819f22a90703b77321b72be17ace1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region bb1819f22a90703b77321b72be17ace1 2024-11-23T17:29:06,292 INFO [StoreOpener-bb1819f22a90703b77321b72be17ace1-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region bb1819f22a90703b77321b72be17ace1 columnFamilyName cf 2024-11-23T17:29:06,292 DEBUG [StoreOpener-bb1819f22a90703b77321b72be17ace1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:29:06,293 INFO [StoreOpener-bb1819f22a90703b77321b72be17ace1-1 {}] regionserver.HStore(327): Store=bb1819f22a90703b77321b72be17ace1/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T17:29:06,293 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1038): replaying wal for bb1819f22a90703b77321b72be17ace1 2024-11-23T17:29:06,293 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/bb1819f22a90703b77321b72be17ace1 2024-11-23T17:29:06,294 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/bb1819f22a90703b77321b72be17ace1 2024-11-23T17:29:06,294 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1048): stopping wal replay for bb1819f22a90703b77321b72be17ace1 2024-11-23T17:29:06,294 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1060): Cleaning up temporary data for bb1819f22a90703b77321b72be17ace1 2024-11-23T17:29:06,295 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1093): writing seq id for bb1819f22a90703b77321b72be17ace1 2024-11-23T17:29:06,308 INFO [StoreOpener-577159f9aec0098b47a0f1bb1df4440f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 577159f9aec0098b47a0f1bb1df4440f 2024-11-23T17:29:06,310 INFO [StoreOpener-577159f9aec0098b47a0f1bb1df4440f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 577159f9aec0098b47a0f1bb1df4440f columnFamilyName cf 2024-11-23T17:29:06,310 DEBUG [StoreOpener-577159f9aec0098b47a0f1bb1df4440f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:29:06,311 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/bb1819f22a90703b77321b72be17ace1/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T17:29:06,311 INFO [StoreOpener-577159f9aec0098b47a0f1bb1df4440f-1 {}] regionserver.HStore(327): Store=577159f9aec0098b47a0f1bb1df4440f/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T17:29:06,311 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1038): replaying wal for 577159f9aec0098b47a0f1bb1df4440f 2024-11-23T17:29:06,312 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/577159f9aec0098b47a0f1bb1df4440f 2024-11-23T17:29:06,312 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1114): Opened bb1819f22a90703b77321b72be17ace1; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72567557, jitterRate=0.08134086430072784}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T17:29:06,312 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1122): Running coprocessor post-open hooks for bb1819f22a90703b77321b72be17ace1 2024-11-23T17:29:06,313 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1006): Region open journal for bb1819f22a90703b77321b72be17ace1: Running coprocessor pre-open hook at 1732382946284Writing region info on filesystem at 1732382946284Initializing all the Stores at 1732382946288 (+4 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732382946288Cleaning up temporary data from old regions at 1732382946294 (+6 ms)Running coprocessor post-open hooks at 1732382946312 (+18 ms)Region opened successfully at 1732382946313 (+1 ms) 2024-11-23T17:29:06,314 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/577159f9aec0098b47a0f1bb1df4440f 2024-11-23T17:29:06,316 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,2,1732382945833.bb1819f22a90703b77321b72be17ace1., pid=146, masterSystemTime=1732382946279 2024-11-23T17:29:06,316 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1048): stopping wal replay for 577159f9aec0098b47a0f1bb1df4440f 2024-11-23T17:29:06,316 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1060): Cleaning up temporary data for 577159f9aec0098b47a0f1bb1df4440f 2024-11-23T17:29:06,319 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=145 updating hbase:meta row=bb1819f22a90703b77321b72be17ace1, regionState=OPEN, openSeqNum=2, regionLocation=215bb12e999f,45467,1732382620632 2024-11-23T17:29:06,321 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1093): writing seq id for 577159f9aec0098b47a0f1bb1df4440f 2024-11-23T17:29:06,322 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,2,1732382945833.bb1819f22a90703b77321b72be17ace1. 2024-11-23T17:29:06,322 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,2,1732382945833.bb1819f22a90703b77321b72be17ace1. 2024-11-23T17:29:06,324 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=146, ppid=145, state=RUNNABLE, hasLock=false; OpenRegionProcedure bb1819f22a90703b77321b72be17ace1, server=215bb12e999f,45467,1732382620632 because future has completed 2024-11-23T17:29:06,330 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=146, resume processing ppid=145 2024-11-23T17:29:06,330 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=146, ppid=145, state=SUCCESS, hasLock=false; OpenRegionProcedure bb1819f22a90703b77321b72be17ace1, server=215bb12e999f,45467,1732382620632 in 199 msec 2024-11-23T17:29:06,332 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=145, ppid=143, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=bb1819f22a90703b77321b72be17ace1, ASSIGN in 360 msec 2024-11-23T17:29:06,343 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/577159f9aec0098b47a0f1bb1df4440f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T17:29:06,343 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1114): Opened 577159f9aec0098b47a0f1bb1df4440f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67442988, jitterRate=0.004978835582733154}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T17:29:06,344 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 577159f9aec0098b47a0f1bb1df4440f 2024-11-23T17:29:06,344 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1006): Region open journal for 577159f9aec0098b47a0f1bb1df4440f: Running coprocessor pre-open hook at 1732382946290Writing region info on filesystem at 1732382946290Initializing all the Stores at 1732382946291 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732382946291Cleaning up temporary data from old regions at 1732382946316 (+25 ms)Running coprocessor post-open hooks at 1732382946344 (+28 ms)Region opened successfully at 1732382946344 2024-11-23T17:29:06,345 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,,1732382945833.577159f9aec0098b47a0f1bb1df4440f., pid=147, masterSystemTime=1732382946282 2024-11-23T17:29:06,348 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,,1732382945833.577159f9aec0098b47a0f1bb1df4440f. 2024-11-23T17:29:06,348 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,,1732382945833.577159f9aec0098b47a0f1bb1df4440f. 2024-11-23T17:29:06,348 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=144 updating hbase:meta row=577159f9aec0098b47a0f1bb1df4440f, regionState=OPEN, openSeqNum=2, regionLocation=215bb12e999f,41065,1732382621000 2024-11-23T17:29:06,351 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=147, ppid=144, state=RUNNABLE, hasLock=false; OpenRegionProcedure 577159f9aec0098b47a0f1bb1df4440f, server=215bb12e999f,41065,1732382621000 because future has completed 2024-11-23T17:29:06,355 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=147, resume processing ppid=144 2024-11-23T17:29:06,355 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=147, ppid=144, state=SUCCESS, hasLock=false; OpenRegionProcedure 577159f9aec0098b47a0f1bb1df4440f, server=215bb12e999f,41065,1732382621000 in 224 msec 2024-11-23T17:29:06,360 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=144, resume processing ppid=143 2024-11-23T17:29:06,360 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=144, ppid=143, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=577159f9aec0098b47a0f1bb1df4440f, ASSIGN in 385 msec 2024-11-23T17:29:06,361 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T17:29:06,361 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732382946361"}]},"ts":"1732382946361"} 2024-11-23T17:29:06,364 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=ENABLED in hbase:meta 2024-11-23T17:29:06,365 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T17:29:06,365 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithMergeRegion-1 jenkins: RWXCA 2024-11-23T17:29:06,370 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41065 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion-1], kv [jenkins: RWXCA] 2024-11-23T17:29:06,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-11-23T17:29:06,658 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44667-0x10168b8ee290002, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:29:06,658 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x10168b8ee290003, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:29:06,658 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45467-0x10168b8ee290001, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:29:06,658 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:29:06,669 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T17:29:06,669 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T17:29:06,669 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-11-23T17:29:06,669 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-11-23T17:29:06,670 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T17:29:06,671 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-11-23T17:29:06,671 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T17:29:06,671 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-11-23T17:29:06,672 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=143, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 835 msec 2024-11-23T17:29:06,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-11-23T17:29:06,976 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-11-23T17:29:06,979 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion-1', row='1', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion-1,,1732382945833.577159f9aec0098b47a0f1bb1df4440f., hostname=215bb12e999f,41065,1732382621000, seqNum=2] 2024-11-23T17:29:06,987 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion-1', row='2', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion-1,2,1732382945833.bb1819f22a90703b77321b72be17ace1., hostname=215bb12e999f,45467,1732382620632, seqNum=2] 2024-11-23T17:29:06,998 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion-1,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion-1 ,, for max=2147483647 with caching=100 2024-11-23T17:29:07,015 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.HMaster$2(2278): Client=jenkins//172.17.0.2 merge regions [577159f9aec0098b47a0f1bb1df4440f, bb1819f22a90703b77321b72be17ace1] 2024-11-23T17:29:07,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure2.ProcedureExecutor(1139): Stored pid=148, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[577159f9aec0098b47a0f1bb1df4440f, bb1819f22a90703b77321b72be17ace1], force=true 2024-11-23T17:29:07,022 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=148, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[577159f9aec0098b47a0f1bb1df4440f, bb1819f22a90703b77321b72be17ace1], force=true 2024-11-23T17:29:07,022 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=148, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[577159f9aec0098b47a0f1bb1df4440f, bb1819f22a90703b77321b72be17ace1], force=true 2024-11-23T17:29:07,022 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=148, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[577159f9aec0098b47a0f1bb1df4440f, bb1819f22a90703b77321b72be17ace1], force=true 2024-11-23T17:29:07,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-11-23T17:29:07,035 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=149, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=577159f9aec0098b47a0f1bb1df4440f, UNASSIGN}, {pid=150, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=bb1819f22a90703b77321b72be17ace1, UNASSIGN}] 2024-11-23T17:29:07,037 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=150, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=bb1819f22a90703b77321b72be17ace1, UNASSIGN 2024-11-23T17:29:07,038 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=149, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=577159f9aec0098b47a0f1bb1df4440f, UNASSIGN 2024-11-23T17:29:07,039 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=150 updating hbase:meta row=bb1819f22a90703b77321b72be17ace1, regionState=CLOSING, regionLocation=215bb12e999f,45467,1732382620632 2024-11-23T17:29:07,039 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=149 updating hbase:meta row=577159f9aec0098b47a0f1bb1df4440f, regionState=CLOSING, regionLocation=215bb12e999f,41065,1732382621000 2024-11-23T17:29:07,043 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=150, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=bb1819f22a90703b77321b72be17ace1, UNASSIGN because future has completed 2024-11-23T17:29:07,043 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-23T17:29:07,043 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=151, ppid=150, state=RUNNABLE, hasLock=false; CloseRegionProcedure bb1819f22a90703b77321b72be17ace1, server=215bb12e999f,45467,1732382620632}] 2024-11-23T17:29:07,045 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=149, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=577159f9aec0098b47a0f1bb1df4440f, UNASSIGN because future has completed 2024-11-23T17:29:07,046 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-23T17:29:07,046 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=152, ppid=149, state=RUNNABLE, hasLock=false; CloseRegionProcedure 577159f9aec0098b47a0f1bb1df4440f, server=215bb12e999f,41065,1732382621000}] 2024-11-23T17:29:07,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-11-23T17:29:07,198 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(122): Close bb1819f22a90703b77321b72be17ace1 2024-11-23T17:29:07,198 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-23T17:29:07,198 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1722): Closing bb1819f22a90703b77321b72be17ace1, disabling compactions & flushes 2024-11-23T17:29:07,199 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,2,1732382945833.bb1819f22a90703b77321b72be17ace1. 2024-11-23T17:29:07,199 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1732382945833.bb1819f22a90703b77321b72be17ace1. 2024-11-23T17:29:07,199 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1732382945833.bb1819f22a90703b77321b72be17ace1. after waiting 0 ms 2024-11-23T17:29:07,199 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,2,1732382945833.bb1819f22a90703b77321b72be17ace1. 2024-11-23T17:29:07,199 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(2902): Flushing bb1819f22a90703b77321b72be17ace1 1/1 column families, dataSize=24 B heapSize=352 B 2024-11-23T17:29:07,199 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] handler.UnassignRegionHandler(122): Close 577159f9aec0098b47a0f1bb1df4440f 2024-11-23T17:29:07,199 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-23T17:29:07,199 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1722): Closing 577159f9aec0098b47a0f1bb1df4440f, disabling compactions & flushes 2024-11-23T17:29:07,199 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1732382945833.577159f9aec0098b47a0f1bb1df4440f. 2024-11-23T17:29:07,199 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1732382945833.577159f9aec0098b47a0f1bb1df4440f. 2024-11-23T17:29:07,199 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1732382945833.577159f9aec0098b47a0f1bb1df4440f. after waiting 0 ms 2024-11-23T17:29:07,199 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1732382945833.577159f9aec0098b47a0f1bb1df4440f. 2024-11-23T17:29:07,199 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(2902): Flushing 577159f9aec0098b47a0f1bb1df4440f 1/1 column families, dataSize=24 B heapSize=352 B 2024-11-23T17:29:07,265 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/bb1819f22a90703b77321b72be17ace1/.tmp/cf/43fee4af031b4bb2bf6a3e01af1dbfc8 is 28, key is 2/cf:/1732382946994/Put/seqid=0 2024-11-23T17:29:07,276 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/577159f9aec0098b47a0f1bb1df4440f/.tmp/cf/a428b4830f3c4ff9b888fc69ca88ede4 is 28, key is 1/cf:/1732382946980/Put/seqid=0 2024-11-23T17:29:07,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-11-23T17:29:07,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742163_1339 (size=4945) 2024-11-23T17:29:07,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742163_1339 (size=4945) 2024-11-23T17:29:07,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742163_1339 (size=4945) 2024-11-23T17:29:07,426 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/bb1819f22a90703b77321b72be17ace1/.tmp/cf/43fee4af031b4bb2bf6a3e01af1dbfc8 2024-11-23T17:29:07,435 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/bb1819f22a90703b77321b72be17ace1/.tmp/cf/43fee4af031b4bb2bf6a3e01af1dbfc8 as hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/bb1819f22a90703b77321b72be17ace1/cf/43fee4af031b4bb2bf6a3e01af1dbfc8 2024-11-23T17:29:07,443 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/bb1819f22a90703b77321b72be17ace1/cf/43fee4af031b4bb2bf6a3e01af1dbfc8, entries=1, sequenceid=5, filesize=4.8 K 2024-11-23T17:29:07,445 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(3140): Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for bb1819f22a90703b77321b72be17ace1 in 246ms, sequenceid=5, compaction requested=false 2024-11-23T17:29:07,445 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion-1' 2024-11-23T17:29:07,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742164_1340 (size=4945) 2024-11-23T17:29:07,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742164_1340 (size=4945) 2024-11-23T17:29:07,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742164_1340 (size=4945) 2024-11-23T17:29:07,468 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/577159f9aec0098b47a0f1bb1df4440f/.tmp/cf/a428b4830f3c4ff9b888fc69ca88ede4 2024-11-23T17:29:07,477 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/577159f9aec0098b47a0f1bb1df4440f/.tmp/cf/a428b4830f3c4ff9b888fc69ca88ede4 as hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/577159f9aec0098b47a0f1bb1df4440f/cf/a428b4830f3c4ff9b888fc69ca88ede4 2024-11-23T17:29:07,489 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/577159f9aec0098b47a0f1bb1df4440f/cf/a428b4830f3c4ff9b888fc69ca88ede4, entries=1, sequenceid=5, filesize=4.8 K 2024-11-23T17:29:07,491 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(3140): Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 577159f9aec0098b47a0f1bb1df4440f in 291ms, sequenceid=5, compaction requested=false 2024-11-23T17:29:07,527 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732382632246_0006_000001 (auth:SIMPLE) from 127.0.0.1:59464 2024-11-23T17:29:07,559 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/bb1819f22a90703b77321b72be17ace1/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-23T17:29:07,564 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:29:07,564 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,2,1732382945833.bb1819f22a90703b77321b72be17ace1. 2024-11-23T17:29:07,564 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1676): Region close journal for bb1819f22a90703b77321b72be17ace1: Waiting for close lock at 1732382947198Running coprocessor pre-close hooks at 1732382947198Disabling compacts and flushes for region at 1732382947198Disabling writes for close at 1732382947199 (+1 ms)Obtaining lock to block concurrent updates at 1732382947199Preparing flush snapshotting stores in bb1819f22a90703b77321b72be17ace1 at 1732382947199Finished memstore snapshotting testtb-testExportFileSystemStateWithMergeRegion-1,2,1732382945833.bb1819f22a90703b77321b72be17ace1., syncing WAL and waiting on mvcc, flushsize=dataSize=24, getHeapSize=336, getOffHeapSize=0, getCellsCount=1 at 1732382947199Flushing stores of testtb-testExportFileSystemStateWithMergeRegion-1,2,1732382945833.bb1819f22a90703b77321b72be17ace1. at 1732382947201 (+2 ms)Flushing bb1819f22a90703b77321b72be17ace1/cf: creating writer at 1732382947201Flushing bb1819f22a90703b77321b72be17ace1/cf: appending metadata at 1732382947264 (+63 ms)Flushing bb1819f22a90703b77321b72be17ace1/cf: closing flushed file at 1732382947264Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1dfd4d06: reopening flushed file at 1732382947434 (+170 ms)Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for bb1819f22a90703b77321b72be17ace1 in 246ms, sequenceid=5, compaction requested=false at 1732382947445 (+11 ms)Writing region close event to WAL at 1732382947508 (+63 ms)Running coprocessor post-close hooks at 1732382947564 (+56 ms)Closed at 1732382947564 2024-11-23T17:29:07,573 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(157): Closed bb1819f22a90703b77321b72be17ace1 2024-11-23T17:29:07,573 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=150 updating hbase:meta row=bb1819f22a90703b77321b72be17ace1, regionState=CLOSED 2024-11-23T17:29:07,576 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=151, ppid=150, state=RUNNABLE, hasLock=false; CloseRegionProcedure bb1819f22a90703b77321b72be17ace1, server=215bb12e999f,45467,1732382620632 because future has completed 2024-11-23T17:29:07,584 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=151, resume processing ppid=150 2024-11-23T17:29:07,584 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=151, ppid=150, state=SUCCESS, hasLock=false; CloseRegionProcedure bb1819f22a90703b77321b72be17ace1, server=215bb12e999f,45467,1732382620632 in 538 msec 2024-11-23T17:29:07,587 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=150, ppid=148, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=bb1819f22a90703b77321b72be17ace1, UNASSIGN in 550 msec 2024-11-23T17:29:07,595 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/577159f9aec0098b47a0f1bb1df4440f/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-23T17:29:07,596 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:29:07,596 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1732382945833.577159f9aec0098b47a0f1bb1df4440f. 2024-11-23T17:29:07,596 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1676): Region close journal for 577159f9aec0098b47a0f1bb1df4440f: Waiting for close lock at 1732382947199Running coprocessor pre-close hooks at 1732382947199Disabling compacts and flushes for region at 1732382947199Disabling writes for close at 1732382947199Obtaining lock to block concurrent updates at 1732382947199Preparing flush snapshotting stores in 577159f9aec0098b47a0f1bb1df4440f at 1732382947199Finished memstore snapshotting testtb-testExportFileSystemStateWithMergeRegion-1,,1732382945833.577159f9aec0098b47a0f1bb1df4440f., syncing WAL and waiting on mvcc, flushsize=dataSize=24, getHeapSize=336, getOffHeapSize=0, getCellsCount=1 at 1732382947200 (+1 ms)Flushing stores of testtb-testExportFileSystemStateWithMergeRegion-1,,1732382945833.577159f9aec0098b47a0f1bb1df4440f. at 1732382947201 (+1 ms)Flushing 577159f9aec0098b47a0f1bb1df4440f/cf: creating writer at 1732382947201Flushing 577159f9aec0098b47a0f1bb1df4440f/cf: appending metadata at 1732382947275 (+74 ms)Flushing 577159f9aec0098b47a0f1bb1df4440f/cf: closing flushed file at 1732382947275Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2b1b2e4c: reopening flushed file at 1732382947477 (+202 ms)Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 577159f9aec0098b47a0f1bb1df4440f in 291ms, sequenceid=5, compaction requested=false at 1732382947491 (+14 ms)Writing region close event to WAL at 1732382947564 (+73 ms)Running coprocessor post-close hooks at 1732382947596 (+32 ms)Closed at 1732382947596 2024-11-23T17:29:07,599 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] handler.UnassignRegionHandler(157): Closed 577159f9aec0098b47a0f1bb1df4440f 2024-11-23T17:29:07,599 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=149 updating hbase:meta row=577159f9aec0098b47a0f1bb1df4440f, regionState=CLOSED 2024-11-23T17:29:07,602 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=152, ppid=149, state=RUNNABLE, hasLock=false; CloseRegionProcedure 577159f9aec0098b47a0f1bb1df4440f, server=215bb12e999f,41065,1732382621000 because future has completed 2024-11-23T17:29:07,607 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=152, resume processing ppid=149 2024-11-23T17:29:07,607 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=152, ppid=149, state=SUCCESS, hasLock=false; CloseRegionProcedure 577159f9aec0098b47a0f1bb1df4440f, server=215bb12e999f,41065,1732382621000 in 557 msec 2024-11-23T17:29:07,610 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=149, resume processing ppid=148 2024-11-23T17:29:07,611 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=149, ppid=148, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=577159f9aec0098b47a0f1bb1df4440f, UNASSIGN in 572 msec 2024-11-23T17:29:07,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-11-23T17:29:07,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742165_1341 (size=84) 2024-11-23T17:29:07,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742165_1341 (size=84) 2024-11-23T17:29:07,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742165_1341 (size=84) 2024-11-23T17:29:07,733 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:29:07,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742166_1342 (size=20) 2024-11-23T17:29:07,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742166_1342 (size=20) 2024-11-23T17:29:07,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742166_1342 (size=20) 2024-11-23T17:29:07,861 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:29:07,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742167_1343 (size=21) 2024-11-23T17:29:07,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742167_1343 (size=21) 2024-11-23T17:29:07,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742167_1343 (size=21) 2024-11-23T17:29:08,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742168_1344 (size=84) 2024-11-23T17:29:08,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742168_1344 (size=84) 2024-11-23T17:29:08,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742168_1344 (size=84) 2024-11-23T17:29:08,087 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:29:08,091 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T17:29:08,115 DEBUG [PEWorker-5 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/90b38de7d4454b3671fca0df1011d7b4/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=-1 2024-11-23T17:29:08,118 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1732382945833.577159f9aec0098b47a0f1bb1df4440f.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-11-23T17:29:08,119 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,2,1732382945833.bb1819f22a90703b77321b72be17ace1.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-11-23T17:29:08,119 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Put {"totalColumns":7,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1732382945834.90b38de7d4454b3671fca0df1011d7b4.","families":{"info":[{"qualifier":"regioninfo","vlen":83,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"merge0000","vlen":84,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"merge0001","vlen":84,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-11-23T17:29:08,126 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=153, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=90b38de7d4454b3671fca0df1011d7b4, ASSIGN}] 2024-11-23T17:29:08,127 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=153, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=90b38de7d4454b3671fca0df1011d7b4, ASSIGN 2024-11-23T17:29:08,129 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=153, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=90b38de7d4454b3671fca0df1011d7b4, ASSIGN; state=MERGED, location=215bb12e999f,41065,1732382621000; forceNewPlan=false, retain=false 2024-11-23T17:29:08,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-11-23T17:29:08,279 INFO [215bb12e999f:39621 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-23T17:29:08,279 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=153 updating hbase:meta row=90b38de7d4454b3671fca0df1011d7b4, regionState=OPENING, regionLocation=215bb12e999f,41065,1732382621000 2024-11-23T17:29:08,282 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=153, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=90b38de7d4454b3671fca0df1011d7b4, ASSIGN because future has completed 2024-11-23T17:29:08,282 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=154, ppid=153, state=RUNNABLE, hasLock=false; OpenRegionProcedure 90b38de7d4454b3671fca0df1011d7b4, server=215bb12e999f,41065,1732382621000}] 2024-11-23T17:29:08,445 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,,1732382945834.90b38de7d4454b3671fca0df1011d7b4. 2024-11-23T17:29:08,446 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(7752): Opening region: {ENCODED => 90b38de7d4454b3671fca0df1011d7b4, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1732382945834.90b38de7d4454b3671fca0df1011d7b4.', STARTKEY => '', ENDKEY => ''} 2024-11-23T17:29:08,446 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,,1732382945834.90b38de7d4454b3671fca0df1011d7b4. service=AccessControlService 2024-11-23T17:29:08,446 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:29:08,447 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 90b38de7d4454b3671fca0df1011d7b4 2024-11-23T17:29:08,447 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1732382945834.90b38de7d4454b3671fca0df1011d7b4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:29:08,447 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(7794): checking encryption for 90b38de7d4454b3671fca0df1011d7b4 2024-11-23T17:29:08,447 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(7797): checking classloading for 90b38de7d4454b3671fca0df1011d7b4 2024-11-23T17:29:08,468 INFO [StoreOpener-90b38de7d4454b3671fca0df1011d7b4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 90b38de7d4454b3671fca0df1011d7b4 2024-11-23T17:29:08,472 INFO [StoreOpener-90b38de7d4454b3671fca0df1011d7b4-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 90b38de7d4454b3671fca0df1011d7b4 columnFamilyName cf 2024-11-23T17:29:08,472 DEBUG [StoreOpener-90b38de7d4454b3671fca0df1011d7b4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:29:08,587 DEBUG [StoreOpener-90b38de7d4454b3671fca0df1011d7b4-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/90b38de7d4454b3671fca0df1011d7b4/cf/43fee4af031b4bb2bf6a3e01af1dbfc8.bb1819f22a90703b77321b72be17ace1->hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/bb1819f22a90703b77321b72be17ace1/cf/43fee4af031b4bb2bf6a3e01af1dbfc8-top 2024-11-23T17:29:08,606 DEBUG [StoreOpener-90b38de7d4454b3671fca0df1011d7b4-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/90b38de7d4454b3671fca0df1011d7b4/cf/a428b4830f3c4ff9b888fc69ca88ede4.577159f9aec0098b47a0f1bb1df4440f->hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/577159f9aec0098b47a0f1bb1df4440f/cf/a428b4830f3c4ff9b888fc69ca88ede4-top 2024-11-23T17:29:08,606 INFO [StoreOpener-90b38de7d4454b3671fca0df1011d7b4-1 {}] regionserver.HStore(327): Store=90b38de7d4454b3671fca0df1011d7b4/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T17:29:08,607 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1038): replaying wal for 90b38de7d4454b3671fca0df1011d7b4 2024-11-23T17:29:08,608 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/90b38de7d4454b3671fca0df1011d7b4 2024-11-23T17:29:08,609 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/90b38de7d4454b3671fca0df1011d7b4 2024-11-23T17:29:08,609 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1048): stopping wal replay for 90b38de7d4454b3671fca0df1011d7b4 2024-11-23T17:29:08,609 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1060): Cleaning up temporary data for 90b38de7d4454b3671fca0df1011d7b4 2024-11-23T17:29:08,611 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1093): writing seq id for 90b38de7d4454b3671fca0df1011d7b4 2024-11-23T17:29:08,612 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1114): Opened 90b38de7d4454b3671fca0df1011d7b4; next sequenceid=9; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73760122, jitterRate=0.09911146759986877}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T17:29:08,613 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 90b38de7d4454b3671fca0df1011d7b4 2024-11-23T17:29:08,613 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1006): Region open journal for 90b38de7d4454b3671fca0df1011d7b4: Running coprocessor pre-open hook at 1732382948447Writing region info on filesystem at 1732382948447Initializing all the Stores at 1732382948448 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732382948448Cleaning up temporary data from old regions at 1732382948609 (+161 ms)Running coprocessor post-open hooks at 1732382948613 (+4 ms)Region opened successfully at 1732382948613 2024-11-23T17:29:08,614 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,,1732382945834.90b38de7d4454b3671fca0df1011d7b4., pid=154, masterSystemTime=1732382948436 2024-11-23T17:29:08,614 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.CompactSplit(342): Ignoring compaction request for testtb-testExportFileSystemStateWithMergeRegion-1,,1732382945834.90b38de7d4454b3671fca0df1011d7b4.,because compaction is disabled. 2024-11-23T17:29:08,618 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,,1732382945834.90b38de7d4454b3671fca0df1011d7b4. 2024-11-23T17:29:08,618 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,,1732382945834.90b38de7d4454b3671fca0df1011d7b4. 2024-11-23T17:29:08,618 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=153 updating hbase:meta row=90b38de7d4454b3671fca0df1011d7b4, regionState=OPEN, openSeqNum=9, regionLocation=215bb12e999f,41065,1732382621000 2024-11-23T17:29:08,620 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=154, ppid=153, state=RUNNABLE, hasLock=false; OpenRegionProcedure 90b38de7d4454b3671fca0df1011d7b4, server=215bb12e999f,41065,1732382621000 because future has completed 2024-11-23T17:29:08,624 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=154, resume processing ppid=153 2024-11-23T17:29:08,624 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=154, ppid=153, state=SUCCESS, hasLock=false; OpenRegionProcedure 90b38de7d4454b3671fca0df1011d7b4, server=215bb12e999f,41065,1732382621000 in 340 msec 2024-11-23T17:29:08,628 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=153, resume processing ppid=148 2024-11-23T17:29:08,629 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=153, ppid=148, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=90b38de7d4454b3671fca0df1011d7b4, ASSIGN in 498 msec 2024-11-23T17:29:08,632 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=148, state=SUCCESS, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[577159f9aec0098b47a0f1bb1df4440f, bb1819f22a90703b77321b72be17ace1], force=true in 1.6120 sec 2024-11-23T17:29:08,885 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-23T17:29:09,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-11-23T17:29:09,176 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: MERGE_REGIONS, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-11-23T17:29:09,177 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } 2024-11-23T17:29:09,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732382949177 (current time:1732382949177). 2024-11-23T17:29:09,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-23T17:29:09,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithMergeRegion-1 VERSION not specified, setting to 2 2024-11-23T17:29:09,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-23T17:29:09,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c53f6fe, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:29:09,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.ClusterIdFetcher(90): Going to request 215bb12e999f,39621,-1 for getting cluster id 2024-11-23T17:29:09,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:29:09,186 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d' 2024-11-23T17:29:09,187 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:29:09,187 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d" 2024-11-23T17:29:09,187 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@73b89bf9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:29:09,187 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [215bb12e999f,39621,-1] 2024-11-23T17:29:09,187 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:29:09,188 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:29:09,189 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58504, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:29:09,189 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@57d0591e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:29:09,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:29:09,191 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=215bb12e999f,44667,1732382620918, seqNum=-1] 2024-11-23T17:29:09,191 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:29:09,193 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59388, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:29:09,199 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621. 2024-11-23T17:29:09,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:29:09,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:29:09,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:29:09,200 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:29:09,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2a627b48, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:29:09,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.ClusterIdFetcher(90): Going to request 215bb12e999f,39621,-1 for getting cluster id 2024-11-23T17:29:09,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:29:09,212 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d' 2024-11-23T17:29:09,213 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:29:09,213 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d" 2024-11-23T17:29:09,213 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@52e11ac9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:29:09,213 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [215bb12e999f,39621,-1] 2024-11-23T17:29:09,213 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:29:09,214 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:29:09,215 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58520, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:29:09,216 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2912844a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:29:09,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:29:09,218 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=215bb12e999f,44667,1732382620918, seqNum=-1] 2024-11-23T17:29:09,219 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:29:09,220 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59392, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:29:09,222 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion-1', locateType=CURRENT is [region=hbase:acl,,1732382625522.41264850465b123187f03a78b2521b39., hostname=215bb12e999f,41065,1732382621000, seqNum=2] 2024-11-23T17:29:09,223 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:29:09,225 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57202, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:29:09,226 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621. 2024-11-23T17:29:09,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor297.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:29:09,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:29:09,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:29:09,227 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:29:09,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion-1], kv [jenkins: RWXCA] 2024-11-23T17:29:09,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-23T17:29:09,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure2.ProcedureExecutor(1139): Stored pid=155, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } 2024-11-23T17:29:09,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }, snapshot procedure id = 155 2024-11-23T17:29:09,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=155 2024-11-23T17:29:09,231 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-23T17:29:09,233 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-23T17:29:09,247 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-23T17:29:09,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742169_1345 (size=216) 2024-11-23T17:29:09,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742169_1345 (size=216) 2024-11-23T17:29:09,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742169_1345 (size=216) 2024-11-23T17:29:09,324 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-23T17:29:09,324 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=156, ppid=155, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 90b38de7d4454b3671fca0df1011d7b4}] 2024-11-23T17:29:09,325 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=156, ppid=155, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 90b38de7d4454b3671fca0df1011d7b4 2024-11-23T17:29:09,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=155 2024-11-23T17:29:09,479 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41065 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=156 2024-11-23T17:29:09,479 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion-1,,1732382945834.90b38de7d4454b3671fca0df1011d7b4. 2024-11-23T17:29:09,479 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.HRegion(2603): Flush status journal for 90b38de7d4454b3671fca0df1011d7b4: 2024-11-23T17:29:09,479 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion-1,,1732382945834.90b38de7d4454b3671fca0df1011d7b4. for snaptb0-testExportFileSystemStateWithMergeRegion-1 completed. 2024-11-23T17:29:09,480 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion-1,,1732382945834.90b38de7d4454b3671fca0df1011d7b4.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-23T17:29:09,480 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:29:09,480 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/90b38de7d4454b3671fca0df1011d7b4/cf/43fee4af031b4bb2bf6a3e01af1dbfc8.bb1819f22a90703b77321b72be17ace1->hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/bb1819f22a90703b77321b72be17ace1/cf/43fee4af031b4bb2bf6a3e01af1dbfc8-top, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/90b38de7d4454b3671fca0df1011d7b4/cf/a428b4830f3c4ff9b888fc69ca88ede4.577159f9aec0098b47a0f1bb1df4440f->hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/577159f9aec0098b47a0f1bb1df4440f/cf/a428b4830f3c4ff9b888fc69ca88ede4-top] hfiles 2024-11-23T17:29:09,480 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(265): Adding reference for file (1/2): hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/90b38de7d4454b3671fca0df1011d7b4/cf/43fee4af031b4bb2bf6a3e01af1dbfc8.bb1819f22a90703b77321b72be17ace1 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-23T17:29:09,481 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(265): Adding reference for file (2/2): hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/90b38de7d4454b3671fca0df1011d7b4/cf/a428b4830f3c4ff9b888fc69ca88ede4.577159f9aec0098b47a0f1bb1df4440f for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-23T17:29:09,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=155 2024-11-23T17:29:09,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742170_1346 (size=269) 2024-11-23T17:29:09,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742170_1346 (size=269) 2024-11-23T17:29:09,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742170_1346 (size=269) 2024-11-23T17:29:09,617 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion-1,,1732382945834.90b38de7d4454b3671fca0df1011d7b4. 2024-11-23T17:29:09,617 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=156 2024-11-23T17:29:09,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.HMaster(4169): Remote procedure done, pid=156 2024-11-23T17:29:09,618 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion-1 on region 90b38de7d4454b3671fca0df1011d7b4 2024-11-23T17:29:09,618 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=156, ppid=155, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 90b38de7d4454b3671fca0df1011d7b4 2024-11-23T17:29:09,624 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=156, resume processing ppid=155 2024-11-23T17:29:09,624 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=156, ppid=155, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 90b38de7d4454b3671fca0df1011d7b4 in 296 msec 2024-11-23T17:29:09,625 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-23T17:29:09,626 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-23T17:29:09,627 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-23T17:29:09,628 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-23T17:29:09,628 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-23T17:29:09,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742171_1347 (size=670) 2024-11-23T17:29:09,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742171_1347 (size=670) 2024-11-23T17:29:09,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742171_1347 (size=670) 2024-11-23T17:29:09,808 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion 2024-11-23T17:29:09,808 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion Metrics about Tables on a single HBase RegionServer 2024-11-23T17:29:09,809 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-23T17:29:09,809 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-23T17:29:09,809 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 Metrics about Tables on a single HBase RegionServer 2024-11-23T17:29:09,810 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports 2024-11-23T17:29:09,839 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-23T17:29:09,839 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-23T17:29:09,845 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-23T17:29:09,845 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }, snapshot procedure id = 155 2024-11-23T17:29:09,848 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=155, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } in 617 msec 2024-11-23T17:29:09,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=155 2024-11-23T17:29:09,862 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-11-23T17:29:09,862 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732382949862 2024-11-23T17:29:09,862 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:37605, tgtDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732382949862, rawTgtDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732382949862, srcFsUri=hdfs://localhost:37605, srcDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf 2024-11-23T17:29:09,919 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:37605, inputRoot=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf 2024-11-23T17:29:09,919 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1325804072_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732382949862, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732382949862/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-23T17:29:09,922 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-11-23T17:29:09,932 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732382949862/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-23T17:29:10,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742172_1348 (size=216) 2024-11-23T17:29:10,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742172_1348 (size=216) 2024-11-23T17:29:10,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742172_1348 (size=216) 2024-11-23T17:29:10,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742173_1349 (size=670) 2024-11-23T17:29:10,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742173_1349 (size=670) 2024-11-23T17:29:10,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742173_1349 (size=670) 2024-11-23T17:29:10,062 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:29:10,063 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:29:10,063 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:29:11,842 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/hadoop-8537733616602339140.jar 2024-11-23T17:29:11,843 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:29:11,843 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:29:11,953 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/hadoop-3981170537336717157.jar 2024-11-23T17:29:11,954 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:29:11,954 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:29:11,968 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:29:11,969 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:29:11,970 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:29:11,970 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:29:11,970 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-23T17:29:11,971 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-23T17:29:11,971 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-23T17:29:11,972 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-23T17:29:11,972 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-23T17:29:11,972 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-23T17:29:11,973 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-23T17:29:11,973 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-23T17:29:11,973 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-23T17:29:11,974 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-23T17:29:11,974 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-23T17:29:11,975 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:29:11,975 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:29:11,975 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T17:29:11,982 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:29:11,983 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:29:11,983 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T17:29:11,984 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T17:29:12,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742174_1350 (size=131440) 2024-11-23T17:29:12,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742174_1350 (size=131440) 2024-11-23T17:29:12,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742174_1350 (size=131440) 2024-11-23T17:29:12,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742175_1351 (size=4188619) 2024-11-23T17:29:12,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742175_1351 (size=4188619) 2024-11-23T17:29:12,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742175_1351 (size=4188619) 2024-11-23T17:29:12,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742176_1352 (size=1323991) 2024-11-23T17:29:12,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742176_1352 (size=1323991) 2024-11-23T17:29:12,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742176_1352 (size=1323991) 2024-11-23T17:29:12,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742177_1353 (size=903733) 2024-11-23T17:29:12,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742177_1353 (size=903733) 2024-11-23T17:29:12,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742177_1353 (size=903733) 2024-11-23T17:29:12,600 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-1_2/usercache/jenkins/appcache/application_1732382632246_0006/container_1732382632246_0006_01_000001/launch_container.sh] 2024-11-23T17:29:12,600 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-1_2/usercache/jenkins/appcache/application_1732382632246_0006/container_1732382632246_0006_01_000001/container_tokens] 2024-11-23T17:29:12,600 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-1_2/usercache/jenkins/appcache/application_1732382632246_0006/container_1732382632246_0006_01_000001/sysfs] 2024-11-23T17:29:12,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742178_1354 (size=8360083) 2024-11-23T17:29:12,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742178_1354 (size=8360083) 2024-11-23T17:29:12,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742178_1354 (size=8360083) 2024-11-23T17:29:12,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742179_1355 (size=6424742) 2024-11-23T17:29:12,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742179_1355 (size=6424742) 2024-11-23T17:29:12,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742179_1355 (size=6424742) 2024-11-23T17:29:13,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742180_1356 (size=1877034) 2024-11-23T17:29:13,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742180_1356 (size=1877034) 2024-11-23T17:29:13,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742180_1356 (size=1877034) 2024-11-23T17:29:13,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742181_1357 (size=77835) 2024-11-23T17:29:13,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742181_1357 (size=77835) 2024-11-23T17:29:13,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742181_1357 (size=77835) 2024-11-23T17:29:13,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742182_1358 (size=30949) 2024-11-23T17:29:13,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742182_1358 (size=30949) 2024-11-23T17:29:13,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742182_1358 (size=30949) 2024-11-23T17:29:13,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742183_1359 (size=1597347) 2024-11-23T17:29:13,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742183_1359 (size=1597347) 2024-11-23T17:29:13,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742183_1359 (size=1597347) 2024-11-23T17:29:13,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742184_1360 (size=4695811) 2024-11-23T17:29:13,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742184_1360 (size=4695811) 2024-11-23T17:29:13,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742184_1360 (size=4695811) 2024-11-23T17:29:13,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742185_1361 (size=232957) 2024-11-23T17:29:13,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742185_1361 (size=232957) 2024-11-23T17:29:13,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742185_1361 (size=232957) 2024-11-23T17:29:13,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742186_1362 (size=127628) 2024-11-23T17:29:13,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742186_1362 (size=127628) 2024-11-23T17:29:13,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742186_1362 (size=127628) 2024-11-23T17:29:13,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742187_1363 (size=20406) 2024-11-23T17:29:13,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742187_1363 (size=20406) 2024-11-23T17:29:13,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742187_1363 (size=20406) 2024-11-23T17:29:13,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742188_1364 (size=5175431) 2024-11-23T17:29:13,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742188_1364 (size=5175431) 2024-11-23T17:29:13,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742188_1364 (size=5175431) 2024-11-23T17:29:13,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742189_1365 (size=217634) 2024-11-23T17:29:13,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742189_1365 (size=217634) 2024-11-23T17:29:13,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742189_1365 (size=217634) 2024-11-23T17:29:13,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742190_1366 (size=1832290) 2024-11-23T17:29:13,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742190_1366 (size=1832290) 2024-11-23T17:29:13,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742190_1366 (size=1832290) 2024-11-23T17:29:13,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742191_1367 (size=322274) 2024-11-23T17:29:13,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742191_1367 (size=322274) 2024-11-23T17:29:13,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742191_1367 (size=322274) 2024-11-23T17:29:13,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742192_1368 (size=503880) 2024-11-23T17:29:13,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742192_1368 (size=503880) 2024-11-23T17:29:13,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742192_1368 (size=503880) 2024-11-23T17:29:13,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742193_1369 (size=440957) 2024-11-23T17:29:13,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742193_1369 (size=440957) 2024-11-23T17:29:13,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742193_1369 (size=440957) 2024-11-23T17:29:13,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742194_1370 (size=29229) 2024-11-23T17:29:13,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742194_1370 (size=29229) 2024-11-23T17:29:13,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742194_1370 (size=29229) 2024-11-23T17:29:13,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742195_1371 (size=24096) 2024-11-23T17:29:13,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742195_1371 (size=24096) 2024-11-23T17:29:13,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742195_1371 (size=24096) 2024-11-23T17:29:13,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742196_1372 (size=111872) 2024-11-23T17:29:13,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742196_1372 (size=111872) 2024-11-23T17:29:13,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742196_1372 (size=111872) 2024-11-23T17:29:13,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742197_1373 (size=45609) 2024-11-23T17:29:13,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742197_1373 (size=45609) 2024-11-23T17:29:13,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742197_1373 (size=45609) 2024-11-23T17:29:13,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742198_1374 (size=136454) 2024-11-23T17:29:13,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742198_1374 (size=136454) 2024-11-23T17:29:13,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742198_1374 (size=136454) 2024-11-23T17:29:13,383 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-23T17:29:13,386 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportFileSystemStateWithMergeRegion-1' hfile list 2024-11-23T17:29:13,388 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=4.8 K 2024-11-23T17:29:13,388 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=4.8 K 2024-11-23T17:29:13,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742199_1375 (size=481) 2024-11-23T17:29:13,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742199_1375 (size=481) 2024-11-23T17:29:13,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742199_1375 (size=481) 2024-11-23T17:29:13,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742200_1376 (size=21) 2024-11-23T17:29:13,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742200_1376 (size=21) 2024-11-23T17:29:13,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742200_1376 (size=21) 2024-11-23T17:29:13,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742201_1377 (size=304057) 2024-11-23T17:29:13,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742201_1377 (size=304057) 2024-11-23T17:29:13,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742201_1377 (size=304057) 2024-11-23T17:29:13,435 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-23T17:29:13,435 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-23T17:29:13,472 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732382632246_0007_000001 (auth:SIMPLE) from 127.0.0.1:40004 2024-11-23T17:29:13,948 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-23T17:29:18,433 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732382632246_0007_000001 (auth:SIMPLE) from 127.0.0.1:51550 2024-11-23T17:29:18,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742202_1378 (size=349755) 2024-11-23T17:29:18,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742202_1378 (size=349755) 2024-11-23T17:29:18,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742202_1378 (size=349755) 2024-11-23T17:29:20,651 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732382632246_0007_000001 (auth:SIMPLE) from 127.0.0.1:43376 2024-11-23T17:29:20,651 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732382632246_0007_000001 (auth:SIMPLE) from 127.0.0.1:40020 2024-11-23T17:29:21,104 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 41264850465b123187f03a78b2521b39 1/1 column families, dataSize=1000 B heapSize=2.41 KB 2024-11-23T17:29:21,270 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/hbase/acl/41264850465b123187f03a78b2521b39/.tmp/l/912564116aab404484258869f66326cb is 86, key is testtb-testExportFileSystemStateWithMergeRegion-1/l:jenkins/1732382946367/Put/seqid=0 2024-11-23T17:29:21,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742203_1379 (size=5595) 2024-11-23T17:29:21,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742203_1379 (size=5595) 2024-11-23T17:29:21,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742203_1379 (size=5595) 2024-11-23T17:29:21,285 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1000 B at sequenceid=20 (bloomFilter=false), to=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/hbase/acl/41264850465b123187f03a78b2521b39/.tmp/l/912564116aab404484258869f66326cb 2024-11-23T17:29:21,291 INFO [MemStoreFlusher.0 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 912564116aab404484258869f66326cb 2024-11-23T17:29:21,291 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/hbase/acl/41264850465b123187f03a78b2521b39/.tmp/l/912564116aab404484258869f66326cb as hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/hbase/acl/41264850465b123187f03a78b2521b39/l/912564116aab404484258869f66326cb 2024-11-23T17:29:21,297 INFO [MemStoreFlusher.0 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 912564116aab404484258869f66326cb 2024-11-23T17:29:21,297 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/hbase/acl/41264850465b123187f03a78b2521b39/l/912564116aab404484258869f66326cb, entries=9, sequenceid=20, filesize=5.5 K 2024-11-23T17:29:21,299 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~1000 B/1000, heapSize ~2.39 KB/2448, currentSize=0 B/0 for 41264850465b123187f03a78b2521b39 in 195ms, sequenceid=20, compaction requested=false 2024-11-23T17:29:21,299 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 41264850465b123187f03a78b2521b39: 2024-11-23T17:29:25,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742204_1380 (size=4945) 2024-11-23T17:29:25,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742204_1380 (size=4945) 2024-11-23T17:29:25,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742204_1380 (size=4945) 2024-11-23T17:29:25,881 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-1_3/usercache/jenkins/appcache/application_1732382632246_0007/container_1732382632246_0007_01_000002/launch_container.sh] 2024-11-23T17:29:25,881 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-1_3/usercache/jenkins/appcache/application_1732382632246_0007/container_1732382632246_0007_01_000002/container_tokens] 2024-11-23T17:29:25,881 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-1_3/usercache/jenkins/appcache/application_1732382632246_0007/container_1732382632246_0007_01_000002/sysfs] 2024-11-23T17:29:25,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742206_1382 (size=4945) 2024-11-23T17:29:25,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742206_1382 (size=4945) 2024-11-23T17:29:25,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742206_1382 (size=4945) 2024-11-23T17:29:26,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742205_1381 (size=22246) 2024-11-23T17:29:26,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742205_1381 (size=22246) 2024-11-23T17:29:26,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742205_1381 (size=22246) 2024-11-23T17:29:26,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742207_1383 (size=483) 2024-11-23T17:29:26,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742207_1383 (size=483) 2024-11-23T17:29:26,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742207_1383 (size=483) 2024-11-23T17:29:26,110 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-0_0/usercache/jenkins/appcache/application_1732382632246_0007/container_1732382632246_0007_01_000003/launch_container.sh] 2024-11-23T17:29:26,110 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-0_0/usercache/jenkins/appcache/application_1732382632246_0007/container_1732382632246_0007_01_000003/container_tokens] 2024-11-23T17:29:26,110 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-0_0/usercache/jenkins/appcache/application_1732382632246_0007/container_1732382632246_0007_01_000003/sysfs] 2024-11-23T17:29:26,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742208_1384 (size=22246) 2024-11-23T17:29:26,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742208_1384 (size=22246) 2024-11-23T17:29:26,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742208_1384 (size=22246) 2024-11-23T17:29:26,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742209_1385 (size=349755) 2024-11-23T17:29:26,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742209_1385 (size=349755) 2024-11-23T17:29:26,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742209_1385 (size=349755) 2024-11-23T17:29:26,225 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732382632246_0007_000001 (auth:SIMPLE) from 127.0.0.1:59328 2024-11-23T17:29:26,232 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732382632246_0007_000001 (auth:SIMPLE) from 127.0.0.1:60786 2024-11-23T17:29:27,682 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-11-23T17:29:27,683 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-11-23T17:29:27,698 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-23T17:29:27,698 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-11-23T17:29:27,699 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-11-23T17:29:27,699 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1325804072_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 at hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-23T17:29:27,699 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/.snapshotinfo 2024-11-23T17:29:27,699 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/data.manifest 2024-11-23T17:29:27,699 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1325804072_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732382949862/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 at hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732382949862/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-23T17:29:27,699 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732382949862/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/.snapshotinfo 2024-11-23T17:29:27,700 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732382949862/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/data.manifest 2024-11-23T17:29:27,712 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-23T17:29:27,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure2.ProcedureExecutor(1139): Stored pid=157, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-23T17:29:27,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-11-23T17:29:27,717 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732382967717"}]},"ts":"1732382967717"} 2024-11-23T17:29:27,719 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=DISABLING in hbase:meta 2024-11-23T17:29:27,719 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithMergeRegion-1 to state=DISABLING 2024-11-23T17:29:27,720 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=158, ppid=157, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1}] 2024-11-23T17:29:27,722 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=159, ppid=158, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=90b38de7d4454b3671fca0df1011d7b4, UNASSIGN}] 2024-11-23T17:29:27,723 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=159, ppid=158, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=90b38de7d4454b3671fca0df1011d7b4, UNASSIGN 2024-11-23T17:29:27,724 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=159 updating hbase:meta row=90b38de7d4454b3671fca0df1011d7b4, regionState=CLOSING, regionLocation=215bb12e999f,41065,1732382621000 2024-11-23T17:29:27,726 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=159, ppid=158, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=90b38de7d4454b3671fca0df1011d7b4, UNASSIGN because future has completed 2024-11-23T17:29:27,726 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T17:29:27,726 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=160, ppid=159, state=RUNNABLE, hasLock=false; CloseRegionProcedure 90b38de7d4454b3671fca0df1011d7b4, server=215bb12e999f,41065,1732382621000}] 2024-11-23T17:29:27,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-11-23T17:29:27,879 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(122): Close 90b38de7d4454b3671fca0df1011d7b4 2024-11-23T17:29:27,880 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T17:29:27,880 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1722): Closing 90b38de7d4454b3671fca0df1011d7b4, disabling compactions & flushes 2024-11-23T17:29:27,880 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1732382945834.90b38de7d4454b3671fca0df1011d7b4. 2024-11-23T17:29:27,880 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1732382945834.90b38de7d4454b3671fca0df1011d7b4. 2024-11-23T17:29:27,880 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1732382945834.90b38de7d4454b3671fca0df1011d7b4. after waiting 0 ms 2024-11-23T17:29:27,880 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1732382945834.90b38de7d4454b3671fca0df1011d7b4. 2024-11-23T17:29:27,886 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/90b38de7d4454b3671fca0df1011d7b4/recovered.edits/12.seqid, newMaxSeqId=12, maxSeqId=8 2024-11-23T17:29:27,887 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:29:27,887 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1732382945834.90b38de7d4454b3671fca0df1011d7b4. 2024-11-23T17:29:27,887 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1676): Region close journal for 90b38de7d4454b3671fca0df1011d7b4: Waiting for close lock at 1732382967880Running coprocessor pre-close hooks at 1732382967880Disabling compacts and flushes for region at 1732382967880Disabling writes for close at 1732382967880Writing region close event to WAL at 1732382967881 (+1 ms)Running coprocessor post-close hooks at 1732382967887 (+6 ms)Closed at 1732382967887 2024-11-23T17:29:27,890 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(157): Closed 90b38de7d4454b3671fca0df1011d7b4 2024-11-23T17:29:27,890 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=159 updating hbase:meta row=90b38de7d4454b3671fca0df1011d7b4, regionState=CLOSED 2024-11-23T17:29:27,896 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=160, ppid=159, state=RUNNABLE, hasLock=false; CloseRegionProcedure 90b38de7d4454b3671fca0df1011d7b4, server=215bb12e999f,41065,1732382621000 because future has completed 2024-11-23T17:29:27,899 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=160, resume processing ppid=159 2024-11-23T17:29:27,899 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=160, ppid=159, state=SUCCESS, hasLock=false; CloseRegionProcedure 90b38de7d4454b3671fca0df1011d7b4, server=215bb12e999f,41065,1732382621000 in 171 msec 2024-11-23T17:29:27,902 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=159, resume processing ppid=158 2024-11-23T17:29:27,902 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=159, ppid=158, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=90b38de7d4454b3671fca0df1011d7b4, UNASSIGN in 177 msec 2024-11-23T17:29:27,906 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=158, resume processing ppid=157 2024-11-23T17:29:27,906 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=158, ppid=157, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 183 msec 2024-11-23T17:29:27,907 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732382967907"}]},"ts":"1732382967907"} 2024-11-23T17:29:27,909 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=DISABLED in hbase:meta 2024-11-23T17:29:27,909 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithMergeRegion-1 to state=DISABLED 2024-11-23T17:29:27,912 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=157, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 198 msec 2024-11-23T17:29:28,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-11-23T17:29:28,036 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-11-23T17:29:28,037 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-23T17:29:28,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure2.ProcedureExecutor(1139): Stored pid=161, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-23T17:29:28,038 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=161, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-23T17:29:28,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-23T17:29:28,039 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=161, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-23T17:29:28,042 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/90b38de7d4454b3671fca0df1011d7b4 2024-11-23T17:29:28,043 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/577159f9aec0098b47a0f1bb1df4440f 2024-11-23T17:29:28,043 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/bb1819f22a90703b77321b72be17ace1 2024-11-23T17:29:28,044 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41065 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-23T17:29:28,045 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/90b38de7d4454b3671fca0df1011d7b4/cf, FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/90b38de7d4454b3671fca0df1011d7b4/recovered.edits] 2024-11-23T17:29:28,045 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/577159f9aec0098b47a0f1bb1df4440f/cf, FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/577159f9aec0098b47a0f1bb1df4440f/recovered.edits] 2024-11-23T17:29:28,045 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/bb1819f22a90703b77321b72be17ace1/cf, FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/bb1819f22a90703b77321b72be17ace1/recovered.edits] 2024-11-23T17:29:28,183 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/90b38de7d4454b3671fca0df1011d7b4/cf/43fee4af031b4bb2bf6a3e01af1dbfc8.bb1819f22a90703b77321b72be17ace1 to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/90b38de7d4454b3671fca0df1011d7b4/cf/43fee4af031b4bb2bf6a3e01af1dbfc8.bb1819f22a90703b77321b72be17ace1 2024-11-23T17:29:28,183 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44667-0x10168b8ee290002, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-23T17:29:28,183 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-23T17:29:28,183 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45467-0x10168b8ee290001, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-23T17:29:28,183 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x10168b8ee290003, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-23T17:29:28,184 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/577159f9aec0098b47a0f1bb1df4440f/cf/a428b4830f3c4ff9b888fc69ca88ede4 to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/577159f9aec0098b47a0f1bb1df4440f/cf/a428b4830f3c4ff9b888fc69ca88ede4 2024-11-23T17:29:28,184 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-11-23T17:29:28,185 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-11-23T17:29:28,185 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-11-23T17:29:28,185 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-11-23T17:29:28,186 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/bb1819f22a90703b77321b72be17ace1/cf/43fee4af031b4bb2bf6a3e01af1dbfc8 to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/bb1819f22a90703b77321b72be17ace1/cf/43fee4af031b4bb2bf6a3e01af1dbfc8 2024-11-23T17:29:28,187 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/90b38de7d4454b3671fca0df1011d7b4/cf/a428b4830f3c4ff9b888fc69ca88ede4.577159f9aec0098b47a0f1bb1df4440f to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/90b38de7d4454b3671fca0df1011d7b4/cf/a428b4830f3c4ff9b888fc69ca88ede4.577159f9aec0098b47a0f1bb1df4440f 2024-11-23T17:29:28,190 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/577159f9aec0098b47a0f1bb1df4440f/recovered.edits/8.seqid to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/577159f9aec0098b47a0f1bb1df4440f/recovered.edits/8.seqid 2024-11-23T17:29:28,193 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/577159f9aec0098b47a0f1bb1df4440f 2024-11-23T17:29:28,194 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44667-0x10168b8ee290002, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-23T17:29:28,194 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44667-0x10168b8ee290002, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:29:28,194 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-23T17:29:28,194 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:29:28,194 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45467-0x10168b8ee290001, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-23T17:29:28,194 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45467-0x10168b8ee290001, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:29:28,194 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x10168b8ee290003, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-23T17:29:28,194 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x10168b8ee290003, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:29:28,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=161 2024-11-23T17:29:28,196 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T17:29:28,196 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T17:29:28,196 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/90b38de7d4454b3671fca0df1011d7b4/recovered.edits/12.seqid to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/90b38de7d4454b3671fca0df1011d7b4/recovered.edits/12.seqid 2024-11-23T17:29:28,196 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T17:29:28,196 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-23T17:29:28,197 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/90b38de7d4454b3671fca0df1011d7b4 2024-11-23T17:29:28,197 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/bb1819f22a90703b77321b72be17ace1/recovered.edits/8.seqid to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/bb1819f22a90703b77321b72be17ace1/recovered.edits/8.seqid 2024-11-23T17:29:28,198 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/bb1819f22a90703b77321b72be17ace1 2024-11-23T17:29:28,198 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithMergeRegion-1 regions 2024-11-23T17:29:28,200 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=161, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-23T17:29:28,203 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of testtb-testExportFileSystemStateWithMergeRegion-1 from hbase:meta 2024-11-23T17:29:28,206 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithMergeRegion-1' descriptor. 2024-11-23T17:29:28,208 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=161, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-23T17:29:28,208 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithMergeRegion-1' from region states. 2024-11-23T17:29:28,208 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1732382945834.90b38de7d4454b3671fca0df1011d7b4.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732382968208"}]},"ts":"9223372036854775807"} 2024-11-23T17:29:28,211 INFO [PEWorker-2 {}] assignment.RegionStateStore(562): Deleted 1 regions from META 2024-11-23T17:29:28,211 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 90b38de7d4454b3671fca0df1011d7b4, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1732382945834.90b38de7d4454b3671fca0df1011d7b4.', STARTKEY => '', ENDKEY => ''}] 2024-11-23T17:29:28,211 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithMergeRegion-1' as deleted. 2024-11-23T17:29:28,211 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732382968211"}]},"ts":"9223372036854775807"} 2024-11-23T17:29:28,214 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithMergeRegion-1 state from META 2024-11-23T17:29:28,215 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=161, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-23T17:29:28,217 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=161, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 178 msec 2024-11-23T17:29:28,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=161 2024-11-23T17:29:28,307 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-23T17:29:28,307 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-11-23T17:29:28,307 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithMergeRegion 2024-11-23T17:29:28,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure2.ProcedureExecutor(1139): Stored pid=162, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-23T17:29:28,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-11-23T17:29:28,336 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732382968336"}]},"ts":"1732382968336"} 2024-11-23T17:29:28,338 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=DISABLING in hbase:meta 2024-11-23T17:29:28,338 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithMergeRegion to state=DISABLING 2024-11-23T17:29:28,340 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=163, ppid=162, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion}] 2024-11-23T17:29:28,342 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=164, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=13b59b0ffd9e01bdd2d43f453a6b2a1a, UNASSIGN}, {pid=165, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=efb4f5fc98bc6102f21e07256724c719, UNASSIGN}] 2024-11-23T17:29:28,343 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=165, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=efb4f5fc98bc6102f21e07256724c719, UNASSIGN 2024-11-23T17:29:28,343 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=164, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=13b59b0ffd9e01bdd2d43f453a6b2a1a, UNASSIGN 2024-11-23T17:29:28,343 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=165 updating hbase:meta row=efb4f5fc98bc6102f21e07256724c719, regionState=CLOSING, regionLocation=215bb12e999f,44667,1732382620918 2024-11-23T17:29:28,343 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=164 updating hbase:meta row=13b59b0ffd9e01bdd2d43f453a6b2a1a, regionState=CLOSING, regionLocation=215bb12e999f,45467,1732382620632 2024-11-23T17:29:28,345 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=164, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=13b59b0ffd9e01bdd2d43f453a6b2a1a, UNASSIGN because future has completed 2024-11-23T17:29:28,345 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T17:29:28,345 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=166, ppid=164, state=RUNNABLE, hasLock=false; CloseRegionProcedure 13b59b0ffd9e01bdd2d43f453a6b2a1a, server=215bb12e999f,45467,1732382620632}] 2024-11-23T17:29:28,346 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=165, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=efb4f5fc98bc6102f21e07256724c719, UNASSIGN because future has completed 2024-11-23T17:29:28,346 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T17:29:28,347 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=167, ppid=165, state=RUNNABLE, hasLock=false; CloseRegionProcedure efb4f5fc98bc6102f21e07256724c719, server=215bb12e999f,44667,1732382620918}] 2024-11-23T17:29:28,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-11-23T17:29:28,498 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] handler.UnassignRegionHandler(122): Close 13b59b0ffd9e01bdd2d43f453a6b2a1a 2024-11-23T17:29:28,498 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T17:29:28,498 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1722): Closing 13b59b0ffd9e01bdd2d43f453a6b2a1a, disabling compactions & flushes 2024-11-23T17:29:28,498 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,,1732382943634.13b59b0ffd9e01bdd2d43f453a6b2a1a. 2024-11-23T17:29:28,498 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,,1732382943634.13b59b0ffd9e01bdd2d43f453a6b2a1a. 2024-11-23T17:29:28,498 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,,1732382943634.13b59b0ffd9e01bdd2d43f453a6b2a1a. after waiting 0 ms 2024-11-23T17:29:28,499 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,,1732382943634.13b59b0ffd9e01bdd2d43f453a6b2a1a. 2024-11-23T17:29:28,499 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] handler.UnassignRegionHandler(122): Close efb4f5fc98bc6102f21e07256724c719 2024-11-23T17:29:28,499 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T17:29:28,499 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1722): Closing efb4f5fc98bc6102f21e07256724c719, disabling compactions & flushes 2024-11-23T17:29:28,499 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,1,1732382943634.efb4f5fc98bc6102f21e07256724c719. 2024-11-23T17:29:28,499 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1732382943634.efb4f5fc98bc6102f21e07256724c719. 2024-11-23T17:29:28,499 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1732382943634.efb4f5fc98bc6102f21e07256724c719. after waiting 0 ms 2024-11-23T17:29:28,499 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,1,1732382943634.efb4f5fc98bc6102f21e07256724c719. 2024-11-23T17:29:28,507 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion/13b59b0ffd9e01bdd2d43f453a6b2a1a/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-23T17:29:28,507 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:29:28,507 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,,1732382943634.13b59b0ffd9e01bdd2d43f453a6b2a1a. 2024-11-23T17:29:28,507 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1676): Region close journal for 13b59b0ffd9e01bdd2d43f453a6b2a1a: Waiting for close lock at 1732382968498Running coprocessor pre-close hooks at 1732382968498Disabling compacts and flushes for region at 1732382968498Disabling writes for close at 1732382968498Writing region close event to WAL at 1732382968500 (+2 ms)Running coprocessor post-close hooks at 1732382968507 (+7 ms)Closed at 1732382968507 2024-11-23T17:29:28,508 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion/efb4f5fc98bc6102f21e07256724c719/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-23T17:29:28,509 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:29:28,509 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,1,1732382943634.efb4f5fc98bc6102f21e07256724c719. 2024-11-23T17:29:28,509 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1676): Region close journal for efb4f5fc98bc6102f21e07256724c719: Waiting for close lock at 1732382968499Running coprocessor pre-close hooks at 1732382968499Disabling compacts and flushes for region at 1732382968499Disabling writes for close at 1732382968499Writing region close event to WAL at 1732382968501 (+2 ms)Running coprocessor post-close hooks at 1732382968509 (+8 ms)Closed at 1732382968509 2024-11-23T17:29:28,510 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] handler.UnassignRegionHandler(157): Closed 13b59b0ffd9e01bdd2d43f453a6b2a1a 2024-11-23T17:29:28,510 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=164 updating hbase:meta row=13b59b0ffd9e01bdd2d43f453a6b2a1a, regionState=CLOSED 2024-11-23T17:29:28,511 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] handler.UnassignRegionHandler(157): Closed efb4f5fc98bc6102f21e07256724c719 2024-11-23T17:29:28,512 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=165 updating hbase:meta row=efb4f5fc98bc6102f21e07256724c719, regionState=CLOSED 2024-11-23T17:29:28,514 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=166, ppid=164, state=RUNNABLE, hasLock=false; CloseRegionProcedure 13b59b0ffd9e01bdd2d43f453a6b2a1a, server=215bb12e999f,45467,1732382620632 because future has completed 2024-11-23T17:29:28,515 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=167, ppid=165, state=RUNNABLE, hasLock=false; CloseRegionProcedure efb4f5fc98bc6102f21e07256724c719, server=215bb12e999f,44667,1732382620918 because future has completed 2024-11-23T17:29:28,522 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=166, resume processing ppid=164 2024-11-23T17:29:28,523 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=166, ppid=164, state=SUCCESS, hasLock=false; CloseRegionProcedure 13b59b0ffd9e01bdd2d43f453a6b2a1a, server=215bb12e999f,45467,1732382620632 in 170 msec 2024-11-23T17:29:28,526 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=167, resume processing ppid=165 2024-11-23T17:29:28,526 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=167, ppid=165, state=SUCCESS, hasLock=false; CloseRegionProcedure efb4f5fc98bc6102f21e07256724c719, server=215bb12e999f,44667,1732382620918 in 175 msec 2024-11-23T17:29:28,527 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=164, ppid=163, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=13b59b0ffd9e01bdd2d43f453a6b2a1a, UNASSIGN in 181 msec 2024-11-23T17:29:28,530 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=165, resume processing ppid=163 2024-11-23T17:29:28,530 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=165, ppid=163, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=efb4f5fc98bc6102f21e07256724c719, UNASSIGN in 184 msec 2024-11-23T17:29:28,533 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=163, resume processing ppid=162 2024-11-23T17:29:28,533 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=163, ppid=162, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 191 msec 2024-11-23T17:29:28,534 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732382968534"}]},"ts":"1732382968534"} 2024-11-23T17:29:28,536 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=DISABLED in hbase:meta 2024-11-23T17:29:28,536 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithMergeRegion to state=DISABLED 2024-11-23T17:29:28,539 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=162, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 230 msec 2024-11-23T17:29:28,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-11-23T17:29:28,656 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-11-23T17:29:28,657 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithMergeRegion 2024-11-23T17:29:28,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure2.ProcedureExecutor(1139): Stored pid=168, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-23T17:29:28,659 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=168, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-23T17:29:28,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithMergeRegion 2024-11-23T17:29:28,660 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=168, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-23T17:29:28,663 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41065 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithMergeRegion 2024-11-23T17:29:28,664 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion/13b59b0ffd9e01bdd2d43f453a6b2a1a 2024-11-23T17:29:28,664 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion/efb4f5fc98bc6102f21e07256724c719 2024-11-23T17:29:28,666 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion/efb4f5fc98bc6102f21e07256724c719/cf, FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion/efb4f5fc98bc6102f21e07256724c719/recovered.edits] 2024-11-23T17:29:28,666 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion/13b59b0ffd9e01bdd2d43f453a6b2a1a/cf, FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion/13b59b0ffd9e01bdd2d43f453a6b2a1a/recovered.edits] 2024-11-23T17:29:28,678 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion/13b59b0ffd9e01bdd2d43f453a6b2a1a/cf/4ac9821d005249adbbfabd0ed228e94d to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/13b59b0ffd9e01bdd2d43f453a6b2a1a/cf/4ac9821d005249adbbfabd0ed228e94d 2024-11-23T17:29:28,681 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion/13b59b0ffd9e01bdd2d43f453a6b2a1a/recovered.edits/9.seqid to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/13b59b0ffd9e01bdd2d43f453a6b2a1a/recovered.edits/9.seqid 2024-11-23T17:29:28,681 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion/efb4f5fc98bc6102f21e07256724c719/cf/cdd3ccc9b60a40179233fa9503d9c8d2 to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/efb4f5fc98bc6102f21e07256724c719/cf/cdd3ccc9b60a40179233fa9503d9c8d2 2024-11-23T17:29:28,682 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion/13b59b0ffd9e01bdd2d43f453a6b2a1a 2024-11-23T17:29:28,685 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion/efb4f5fc98bc6102f21e07256724c719/recovered.edits/9.seqid to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/efb4f5fc98bc6102f21e07256724c719/recovered.edits/9.seqid 2024-11-23T17:29:28,686 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithMergeRegion/efb4f5fc98bc6102f21e07256724c719 2024-11-23T17:29:28,686 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithMergeRegion regions 2024-11-23T17:29:28,698 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=168, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-23T17:29:28,701 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithMergeRegion from hbase:meta 2024-11-23T17:29:28,703 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithMergeRegion' descriptor. 2024-11-23T17:29:28,705 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=168, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-23T17:29:28,705 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithMergeRegion' from region states. 2024-11-23T17:29:28,705 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion,,1732382943634.13b59b0ffd9e01bdd2d43f453a6b2a1a.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732382968705"}]},"ts":"9223372036854775807"} 2024-11-23T17:29:28,705 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion,1,1732382943634.efb4f5fc98bc6102f21e07256724c719.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732382968705"}]},"ts":"9223372036854775807"} 2024-11-23T17:29:28,708 INFO [PEWorker-4 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-23T17:29:28,708 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 13b59b0ffd9e01bdd2d43f453a6b2a1a, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1732382943634.13b59b0ffd9e01bdd2d43f453a6b2a1a.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => efb4f5fc98bc6102f21e07256724c719, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1732382943634.efb4f5fc98bc6102f21e07256724c719.', STARTKEY => '1', ENDKEY => ''}] 2024-11-23T17:29:28,708 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithMergeRegion' as deleted. 2024-11-23T17:29:28,708 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732382968708"}]},"ts":"9223372036854775807"} 2024-11-23T17:29:28,710 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithMergeRegion state from META 2024-11-23T17:29:28,712 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=168, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-23T17:29:28,714 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=168, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 55 msec 2024-11-23T17:29:28,726 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x10168b8ee290003, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-23T17:29:28,726 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45467-0x10168b8ee290001, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-23T17:29:28,726 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44667-0x10168b8ee290002, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-23T17:29:28,726 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-23T17:29:28,727 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-11-23T17:29:28,727 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-11-23T17:29:28,728 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-11-23T17:29:28,728 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-11-23T17:29:28,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44667-0x10168b8ee290002, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-23T17:29:28,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x10168b8ee290003, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-23T17:29:28,736 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45467-0x10168b8ee290001, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-23T17:29:28,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x10168b8ee290003, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:29:28,736 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45467-0x10168b8ee290001, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:29:28,737 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44667-0x10168b8ee290002, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:29:28,737 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-23T17:29:28,737 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:29:28,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=168 2024-11-23T17:29:28,739 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithMergeRegion 2024-11-23T17:29:28,739 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-11-23T17:29:28,749 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithMergeRegion" type: DISABLED 2024-11-23T17:29:28,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-23T17:29:28,752 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithMergeRegion" type: DISABLED 2024-11-23T17:29:28,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-23T17:29:28,756 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithMergeRegion-1" type: DISABLED 2024-11-23T17:29:28,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-23T17:29:28,786 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithMergeRegion Thread=814 (was 802) Potentially hanging thread: process reaper (pid 120792) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-17 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1251325229_1 at /127.0.0.1:39072 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-19 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-18 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-16 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1325804072_22 at /127.0.0.1:59106 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1325804072_22 at /127.0.0.1:50724 [Waiting for operation #8] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-20 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41331 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1549690208) connection to localhost/127.0.0.1:41331 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1325804072_22 at /127.0.0.1:39108 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-5761 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1251325229_1 at /127.0.0.1:59078 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=817 (was 799) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1052 (was 1231), ProcessCount=20 (was 20), AvailableMemoryMB=1730 (was 1944) 2024-11-23T17:29:28,786 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=814 is superior to 500 2024-11-23T17:29:28,809 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportExpiredSnapshot Thread=814, OpenFileDescriptor=817, MaxFileDescriptor=1048576, SystemLoadAverage=1052, ProcessCount=20, AvailableMemoryMB=1729 2024-11-23T17:29:28,809 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=814 is superior to 500 2024-11-23T17:29:28,811 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T17:29:28,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure2.ProcedureExecutor(1139): Stored pid=169, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-23T17:29:28,814 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T17:29:28,814 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:29:28,814 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportExpiredSnapshot" procId is: 169 2024-11-23T17:29:28,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=169 2024-11-23T17:29:28,815 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T17:29:28,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742210_1386 (size=407) 2024-11-23T17:29:28,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742210_1386 (size=407) 2024-11-23T17:29:28,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742210_1386 (size=407) 2024-11-23T17:29:28,841 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => faff3508be89550a12635573fe1c9dc5, NAME => 'testtb-testExportExpiredSnapshot,,1732382968811.faff3508be89550a12635573fe1c9dc5.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf 2024-11-23T17:29:28,841 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 653be934b06b3ef7ec2913ea01c4f27f, NAME => 'testtb-testExportExpiredSnapshot,1,1732382968811.653be934b06b3ef7ec2913ea01c4f27f.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf 2024-11-23T17:29:28,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742212_1388 (size=68) 2024-11-23T17:29:28,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742212_1388 (size=68) 2024-11-23T17:29:28,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742212_1388 (size=68) 2024-11-23T17:29:28,887 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,,1732382968811.faff3508be89550a12635573fe1c9dc5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:29:28,887 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1722): Closing faff3508be89550a12635573fe1c9dc5, disabling compactions & flushes 2024-11-23T17:29:28,887 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,,1732382968811.faff3508be89550a12635573fe1c9dc5. 2024-11-23T17:29:28,887 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,,1732382968811.faff3508be89550a12635573fe1c9dc5. 2024-11-23T17:29:28,887 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,,1732382968811.faff3508be89550a12635573fe1c9dc5. after waiting 0 ms 2024-11-23T17:29:28,887 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,,1732382968811.faff3508be89550a12635573fe1c9dc5. 2024-11-23T17:29:28,887 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,,1732382968811.faff3508be89550a12635573fe1c9dc5. 2024-11-23T17:29:28,887 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1676): Region close journal for faff3508be89550a12635573fe1c9dc5: Waiting for close lock at 1732382968887Disabling compacts and flushes for region at 1732382968887Disabling writes for close at 1732382968887Writing region close event to WAL at 1732382968887Closed at 1732382968887 2024-11-23T17:29:28,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742211_1387 (size=68) 2024-11-23T17:29:28,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742211_1387 (size=68) 2024-11-23T17:29:28,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742211_1387 (size=68) 2024-11-23T17:29:28,901 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,1,1732382968811.653be934b06b3ef7ec2913ea01c4f27f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:29:28,901 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1722): Closing 653be934b06b3ef7ec2913ea01c4f27f, disabling compactions & flushes 2024-11-23T17:29:28,901 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,1,1732382968811.653be934b06b3ef7ec2913ea01c4f27f. 2024-11-23T17:29:28,901 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,1,1732382968811.653be934b06b3ef7ec2913ea01c4f27f. 2024-11-23T17:29:28,901 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,1,1732382968811.653be934b06b3ef7ec2913ea01c4f27f. after waiting 0 ms 2024-11-23T17:29:28,901 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,1,1732382968811.653be934b06b3ef7ec2913ea01c4f27f. 2024-11-23T17:29:28,901 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,1,1732382968811.653be934b06b3ef7ec2913ea01c4f27f. 2024-11-23T17:29:28,901 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1676): Region close journal for 653be934b06b3ef7ec2913ea01c4f27f: Waiting for close lock at 1732382968901Disabling compacts and flushes for region at 1732382968901Disabling writes for close at 1732382968901Writing region close event to WAL at 1732382968901Closed at 1732382968901 2024-11-23T17:29:28,902 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T17:29:28,903 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportExpiredSnapshot,,1732382968811.faff3508be89550a12635573fe1c9dc5.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1732382968903"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732382968903"}]},"ts":"1732382968903"} 2024-11-23T17:29:28,903 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportExpiredSnapshot,1,1732382968811.653be934b06b3ef7ec2913ea01c4f27f.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1732382968903"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732382968903"}]},"ts":"1732382968903"} 2024-11-23T17:29:28,907 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-23T17:29:28,908 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T17:29:28,909 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732382968908"}]},"ts":"1732382968908"} 2024-11-23T17:29:28,910 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=ENABLING in hbase:meta 2024-11-23T17:29:28,911 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {215bb12e999f=0} racks are {/default-rack=0} 2024-11-23T17:29:28,912 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-23T17:29:28,912 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-23T17:29:28,912 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-23T17:29:28,912 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-23T17:29:28,912 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-23T17:29:28,912 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-23T17:29:28,912 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-23T17:29:28,912 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-23T17:29:28,912 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-23T17:29:28,912 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-23T17:29:28,913 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=faff3508be89550a12635573fe1c9dc5, ASSIGN}, {pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=653be934b06b3ef7ec2913ea01c4f27f, ASSIGN}] 2024-11-23T17:29:28,915 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=653be934b06b3ef7ec2913ea01c4f27f, ASSIGN 2024-11-23T17:29:28,915 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=faff3508be89550a12635573fe1c9dc5, ASSIGN 2024-11-23T17:29:28,916 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=653be934b06b3ef7ec2913ea01c4f27f, ASSIGN; state=OFFLINE, location=215bb12e999f,45467,1732382620632; forceNewPlan=false, retain=false 2024-11-23T17:29:28,916 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=faff3508be89550a12635573fe1c9dc5, ASSIGN; state=OFFLINE, location=215bb12e999f,44667,1732382620918; forceNewPlan=false, retain=false 2024-11-23T17:29:28,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=169 2024-11-23T17:29:29,066 INFO [215bb12e999f:39621 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-23T17:29:29,067 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=171 updating hbase:meta row=653be934b06b3ef7ec2913ea01c4f27f, regionState=OPENING, regionLocation=215bb12e999f,45467,1732382620632 2024-11-23T17:29:29,067 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=170 updating hbase:meta row=faff3508be89550a12635573fe1c9dc5, regionState=OPENING, regionLocation=215bb12e999f,44667,1732382620918 2024-11-23T17:29:29,068 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=653be934b06b3ef7ec2913ea01c4f27f, ASSIGN because future has completed 2024-11-23T17:29:29,069 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=172, ppid=171, state=RUNNABLE, hasLock=false; OpenRegionProcedure 653be934b06b3ef7ec2913ea01c4f27f, server=215bb12e999f,45467,1732382620632}] 2024-11-23T17:29:29,070 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=faff3508be89550a12635573fe1c9dc5, ASSIGN because future has completed 2024-11-23T17:29:29,070 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=173, ppid=170, state=RUNNABLE, hasLock=false; OpenRegionProcedure faff3508be89550a12635573fe1c9dc5, server=215bb12e999f,44667,1732382620918}] 2024-11-23T17:29:29,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=169 2024-11-23T17:29:29,224 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] handler.AssignRegionHandler(132): Open testtb-testExportExpiredSnapshot,1,1732382968811.653be934b06b3ef7ec2913ea01c4f27f. 2024-11-23T17:29:29,224 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] handler.AssignRegionHandler(132): Open testtb-testExportExpiredSnapshot,,1732382968811.faff3508be89550a12635573fe1c9dc5. 2024-11-23T17:29:29,224 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(7752): Opening region: {ENCODED => faff3508be89550a12635573fe1c9dc5, NAME => 'testtb-testExportExpiredSnapshot,,1732382968811.faff3508be89550a12635573fe1c9dc5.', STARTKEY => '', ENDKEY => '1'} 2024-11-23T17:29:29,224 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(7752): Opening region: {ENCODED => 653be934b06b3ef7ec2913ea01c4f27f, NAME => 'testtb-testExportExpiredSnapshot,1,1732382968811.653be934b06b3ef7ec2913ea01c4f27f.', STARTKEY => '1', ENDKEY => ''} 2024-11-23T17:29:29,225 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportExpiredSnapshot,,1732382968811.faff3508be89550a12635573fe1c9dc5. service=AccessControlService 2024-11-23T17:29:29,225 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportExpiredSnapshot,1,1732382968811.653be934b06b3ef7ec2913ea01c4f27f. service=AccessControlService 2024-11-23T17:29:29,225 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:29:29,225 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:29:29,225 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportExpiredSnapshot faff3508be89550a12635573fe1c9dc5 2024-11-23T17:29:29,225 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportExpiredSnapshot 653be934b06b3ef7ec2913ea01c4f27f 2024-11-23T17:29:29,225 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,,1732382968811.faff3508be89550a12635573fe1c9dc5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:29:29,225 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,1,1732382968811.653be934b06b3ef7ec2913ea01c4f27f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:29:29,225 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(7794): checking encryption for 653be934b06b3ef7ec2913ea01c4f27f 2024-11-23T17:29:29,225 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(7794): checking encryption for faff3508be89550a12635573fe1c9dc5 2024-11-23T17:29:29,225 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(7797): checking classloading for 653be934b06b3ef7ec2913ea01c4f27f 2024-11-23T17:29:29,225 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(7797): checking classloading for faff3508be89550a12635573fe1c9dc5 2024-11-23T17:29:29,228 INFO [StoreOpener-653be934b06b3ef7ec2913ea01c4f27f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 653be934b06b3ef7ec2913ea01c4f27f 2024-11-23T17:29:29,228 INFO [StoreOpener-faff3508be89550a12635573fe1c9dc5-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region faff3508be89550a12635573fe1c9dc5 2024-11-23T17:29:29,229 INFO [StoreOpener-653be934b06b3ef7ec2913ea01c4f27f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 653be934b06b3ef7ec2913ea01c4f27f columnFamilyName cf 2024-11-23T17:29:29,229 INFO [StoreOpener-faff3508be89550a12635573fe1c9dc5-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region faff3508be89550a12635573fe1c9dc5 columnFamilyName cf 2024-11-23T17:29:29,229 DEBUG [StoreOpener-653be934b06b3ef7ec2913ea01c4f27f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:29:29,229 DEBUG [StoreOpener-faff3508be89550a12635573fe1c9dc5-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:29:29,229 INFO [StoreOpener-faff3508be89550a12635573fe1c9dc5-1 {}] regionserver.HStore(327): Store=faff3508be89550a12635573fe1c9dc5/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T17:29:29,229 INFO [StoreOpener-653be934b06b3ef7ec2913ea01c4f27f-1 {}] regionserver.HStore(327): Store=653be934b06b3ef7ec2913ea01c4f27f/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T17:29:29,230 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1038): replaying wal for faff3508be89550a12635573fe1c9dc5 2024-11-23T17:29:29,230 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1038): replaying wal for 653be934b06b3ef7ec2913ea01c4f27f 2024-11-23T17:29:29,230 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportExpiredSnapshot/faff3508be89550a12635573fe1c9dc5 2024-11-23T17:29:29,230 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportExpiredSnapshot/653be934b06b3ef7ec2913ea01c4f27f 2024-11-23T17:29:29,231 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportExpiredSnapshot/faff3508be89550a12635573fe1c9dc5 2024-11-23T17:29:29,231 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportExpiredSnapshot/653be934b06b3ef7ec2913ea01c4f27f 2024-11-23T17:29:29,231 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1048): stopping wal replay for faff3508be89550a12635573fe1c9dc5 2024-11-23T17:29:29,231 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1048): stopping wal replay for 653be934b06b3ef7ec2913ea01c4f27f 2024-11-23T17:29:29,231 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1060): Cleaning up temporary data for 653be934b06b3ef7ec2913ea01c4f27f 2024-11-23T17:29:29,231 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1060): Cleaning up temporary data for faff3508be89550a12635573fe1c9dc5 2024-11-23T17:29:29,232 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1093): writing seq id for faff3508be89550a12635573fe1c9dc5 2024-11-23T17:29:29,232 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1093): writing seq id for 653be934b06b3ef7ec2913ea01c4f27f 2024-11-23T17:29:29,234 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportExpiredSnapshot/653be934b06b3ef7ec2913ea01c4f27f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T17:29:29,234 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportExpiredSnapshot/faff3508be89550a12635573fe1c9dc5/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T17:29:29,235 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1114): Opened faff3508be89550a12635573fe1c9dc5; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63920077, jitterRate=-0.047516629099845886}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T17:29:29,235 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1122): Running coprocessor post-open hooks for faff3508be89550a12635573fe1c9dc5 2024-11-23T17:29:29,235 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1114): Opened 653be934b06b3ef7ec2913ea01c4f27f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74582439, jitterRate=0.11136494576931}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T17:29:29,235 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 653be934b06b3ef7ec2913ea01c4f27f 2024-11-23T17:29:29,235 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1006): Region open journal for faff3508be89550a12635573fe1c9dc5: Running coprocessor pre-open hook at 1732382969226Writing region info on filesystem at 1732382969226Initializing all the Stores at 1732382969226Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732382969226Cleaning up temporary data from old regions at 1732382969231 (+5 ms)Running coprocessor post-open hooks at 1732382969235 (+4 ms)Region opened successfully at 1732382969235 2024-11-23T17:29:29,235 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1006): Region open journal for 653be934b06b3ef7ec2913ea01c4f27f: Running coprocessor pre-open hook at 1732382969226Writing region info on filesystem at 1732382969226Initializing all the Stores at 1732382969226Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732382969226Cleaning up temporary data from old regions at 1732382969231 (+5 ms)Running coprocessor post-open hooks at 1732382969235 (+4 ms)Region opened successfully at 1732382969235 2024-11-23T17:29:29,236 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportExpiredSnapshot,1,1732382968811.653be934b06b3ef7ec2913ea01c4f27f., pid=172, masterSystemTime=1732382969221 2024-11-23T17:29:29,236 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportExpiredSnapshot,,1732382968811.faff3508be89550a12635573fe1c9dc5., pid=173, masterSystemTime=1732382969221 2024-11-23T17:29:29,238 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportExpiredSnapshot,1,1732382968811.653be934b06b3ef7ec2913ea01c4f27f. 2024-11-23T17:29:29,238 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] handler.AssignRegionHandler(153): Opened testtb-testExportExpiredSnapshot,1,1732382968811.653be934b06b3ef7ec2913ea01c4f27f. 2024-11-23T17:29:29,239 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=171 updating hbase:meta row=653be934b06b3ef7ec2913ea01c4f27f, regionState=OPEN, openSeqNum=2, regionLocation=215bb12e999f,45467,1732382620632 2024-11-23T17:29:29,239 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportExpiredSnapshot,,1732382968811.faff3508be89550a12635573fe1c9dc5. 2024-11-23T17:29:29,239 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] handler.AssignRegionHandler(153): Opened testtb-testExportExpiredSnapshot,,1732382968811.faff3508be89550a12635573fe1c9dc5. 2024-11-23T17:29:29,242 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=170 updating hbase:meta row=faff3508be89550a12635573fe1c9dc5, regionState=OPEN, openSeqNum=2, regionLocation=215bb12e999f,44667,1732382620918 2024-11-23T17:29:29,243 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=172, ppid=171, state=RUNNABLE, hasLock=false; OpenRegionProcedure 653be934b06b3ef7ec2913ea01c4f27f, server=215bb12e999f,45467,1732382620632 because future has completed 2024-11-23T17:29:29,244 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=173, ppid=170, state=RUNNABLE, hasLock=false; OpenRegionProcedure faff3508be89550a12635573fe1c9dc5, server=215bb12e999f,44667,1732382620918 because future has completed 2024-11-23T17:29:29,245 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=172, resume processing ppid=171 2024-11-23T17:29:29,246 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=172, ppid=171, state=SUCCESS, hasLock=false; OpenRegionProcedure 653be934b06b3ef7ec2913ea01c4f27f, server=215bb12e999f,45467,1732382620632 in 175 msec 2024-11-23T17:29:29,246 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=173, resume processing ppid=170 2024-11-23T17:29:29,246 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=173, ppid=170, state=SUCCESS, hasLock=false; OpenRegionProcedure faff3508be89550a12635573fe1c9dc5, server=215bb12e999f,44667,1732382620918 in 174 msec 2024-11-23T17:29:29,247 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=171, ppid=169, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=653be934b06b3ef7ec2913ea01c4f27f, ASSIGN in 333 msec 2024-11-23T17:29:29,248 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=170, resume processing ppid=169 2024-11-23T17:29:29,248 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=170, ppid=169, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=faff3508be89550a12635573fe1c9dc5, ASSIGN in 333 msec 2024-11-23T17:29:29,248 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T17:29:29,249 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732382969248"}]},"ts":"1732382969248"} 2024-11-23T17:29:29,250 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=ENABLED in hbase:meta 2024-11-23T17:29:29,251 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T17:29:29,251 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportExpiredSnapshot jenkins: RWXCA 2024-11-23T17:29:29,255 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41065 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-11-23T17:29:29,299 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45467-0x10168b8ee290001, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:29:29,299 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x10168b8ee290003, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:29:29,299 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44667-0x10168b8ee290002, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:29:29,299 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:29:29,309 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:29:29,310 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:29:29,310 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:29:29,311 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:29:29,312 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=169, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportExpiredSnapshot in 499 msec 2024-11-23T17:29:29,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=169 2024-11-23T17:29:29,446 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-11-23T17:29:29,446 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportExpiredSnapshot get assigned. Timeout = 60000ms 2024-11-23T17:29:29,447 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T17:29:29,451 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportExpiredSnapshot assigned to meta. Checking AM states. 2024-11-23T17:29:29,452 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T17:29:29,452 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportExpiredSnapshot assigned. 2024-11-23T17:29:29,452 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-23T17:29:29,455 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-11-23T17:29:29,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732382969455 (current time:1732382969455). 2024-11-23T17:29:29,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-23T17:29:29,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-11-23T17:29:29,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-23T17:29:29,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@753dc1a4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:29:29,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.ClusterIdFetcher(90): Going to request 215bb12e999f,39621,-1 for getting cluster id 2024-11-23T17:29:29,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:29:29,457 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d' 2024-11-23T17:29:29,457 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:29:29,457 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d" 2024-11-23T17:29:29,458 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4bd70178, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:29:29,458 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [215bb12e999f,39621,-1] 2024-11-23T17:29:29,458 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:29:29,458 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:29:29,459 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37892, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:29:29,459 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@23151a41, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:29:29,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:29:29,460 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=215bb12e999f,44667,1732382620918, seqNum=-1] 2024-11-23T17:29:29,461 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:29:29,462 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46304, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:29:29,463 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621. 2024-11-23T17:29:29,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:29:29,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:29:29,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:29:29,464 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:29:29,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@b9bba45, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:29:29,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.ClusterIdFetcher(90): Going to request 215bb12e999f,39621,-1 for getting cluster id 2024-11-23T17:29:29,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:29:29,466 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d' 2024-11-23T17:29:29,467 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:29:29,467 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d" 2024-11-23T17:29:29,467 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@622d274e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:29:29,467 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [215bb12e999f,39621,-1] 2024-11-23T17:29:29,467 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:29:29,467 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:29:29,468 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37908, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:29:29,469 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4546931f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:29:29,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:29:29,470 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=215bb12e999f,44667,1732382620918, seqNum=-1] 2024-11-23T17:29:29,471 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:29:29,472 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46310, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:29:29,473 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1732382625522.41264850465b123187f03a78b2521b39., hostname=215bb12e999f,41065,1732382621000, seqNum=2] 2024-11-23T17:29:29,474 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:29:29,475 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41286, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:29:29,476 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621. 2024-11-23T17:29:29,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor297.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:29:29,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:29:29,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:29:29,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-11-23T17:29:29,477 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:29:29,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-23T17:29:29,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure2.ProcedureExecutor(1139): Stored pid=174, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-11-23T17:29:29,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 174 2024-11-23T17:29:29,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=174 2024-11-23T17:29:29,482 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-23T17:29:29,483 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-23T17:29:29,486 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-23T17:29:29,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742213_1389 (size=170) 2024-11-23T17:29:29,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742213_1389 (size=170) 2024-11-23T17:29:29,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742213_1389 (size=170) 2024-11-23T17:29:29,499 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-23T17:29:29,499 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=175, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure faff3508be89550a12635573fe1c9dc5}, {pid=176, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 653be934b06b3ef7ec2913ea01c4f27f}] 2024-11-23T17:29:29,501 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=176, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 653be934b06b3ef7ec2913ea01c4f27f 2024-11-23T17:29:29,501 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=175, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure faff3508be89550a12635573fe1c9dc5 2024-11-23T17:29:29,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=174 2024-11-23T17:29:29,653 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44667 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=175 2024-11-23T17:29:29,653 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45467 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=176 2024-11-23T17:29:29,653 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,1,1732382968811.653be934b06b3ef7ec2913ea01c4f27f. 2024-11-23T17:29:29,653 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.HRegion(2603): Flush status journal for 653be934b06b3ef7ec2913ea01c4f27f: 2024-11-23T17:29:29,653 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,1,1732382968811.653be934b06b3ef7ec2913ea01c4f27f. for emptySnaptb0-testExportExpiredSnapshot completed. 2024-11-23T17:29:29,653 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,1,1732382968811.653be934b06b3ef7ec2913ea01c4f27f.' region-info for snapshot=emptySnaptb0-testExportExpiredSnapshot 2024-11-23T17:29:29,653 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:29:29,653 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-23T17:29:29,654 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,,1732382968811.faff3508be89550a12635573fe1c9dc5. 2024-11-23T17:29:29,654 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.HRegion(2603): Flush status journal for faff3508be89550a12635573fe1c9dc5: 2024-11-23T17:29:29,654 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,,1732382968811.faff3508be89550a12635573fe1c9dc5. for emptySnaptb0-testExportExpiredSnapshot completed. 2024-11-23T17:29:29,654 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,,1732382968811.faff3508be89550a12635573fe1c9dc5.' region-info for snapshot=emptySnaptb0-testExportExpiredSnapshot 2024-11-23T17:29:29,654 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:29:29,654 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-23T17:29:29,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742214_1390 (size=71) 2024-11-23T17:29:29,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742214_1390 (size=71) 2024-11-23T17:29:29,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742214_1390 (size=71) 2024-11-23T17:29:29,703 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,1,1732382968811.653be934b06b3ef7ec2913ea01c4f27f. 2024-11-23T17:29:29,703 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=176 2024-11-23T17:29:29,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.HMaster(4169): Remote procedure done, pid=176 2024-11-23T17:29:29,704 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportExpiredSnapshot on region 653be934b06b3ef7ec2913ea01c4f27f 2024-11-23T17:29:29,704 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=176, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 653be934b06b3ef7ec2913ea01c4f27f 2024-11-23T17:29:29,707 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=176, ppid=174, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 653be934b06b3ef7ec2913ea01c4f27f in 206 msec 2024-11-23T17:29:29,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742215_1391 (size=71) 2024-11-23T17:29:29,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742215_1391 (size=71) 2024-11-23T17:29:29,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742215_1391 (size=71) 2024-11-23T17:29:29,721 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,,1732382968811.faff3508be89550a12635573fe1c9dc5. 2024-11-23T17:29:29,721 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=175 2024-11-23T17:29:29,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.HMaster(4169): Remote procedure done, pid=175 2024-11-23T17:29:29,721 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportExpiredSnapshot on region faff3508be89550a12635573fe1c9dc5 2024-11-23T17:29:29,721 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=175, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure faff3508be89550a12635573fe1c9dc5 2024-11-23T17:29:29,729 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=175, resume processing ppid=174 2024-11-23T17:29:29,729 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=175, ppid=174, state=SUCCESS, hasLock=false; SnapshotRegionProcedure faff3508be89550a12635573fe1c9dc5 in 225 msec 2024-11-23T17:29:29,729 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-23T17:29:29,730 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-23T17:29:29,731 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-23T17:29:29,731 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportExpiredSnapshot 2024-11-23T17:29:29,732 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/.tmp/emptySnaptb0-testExportExpiredSnapshot 2024-11-23T17:29:29,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742216_1392 (size=552) 2024-11-23T17:29:29,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742216_1392 (size=552) 2024-11-23T17:29:29,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742216_1392 (size=552) 2024-11-23T17:29:29,758 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-23T17:29:29,762 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-23T17:29:29,763 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/.tmp/emptySnaptb0-testExportExpiredSnapshot to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/emptySnaptb0-testExportExpiredSnapshot 2024-11-23T17:29:29,764 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-23T17:29:29,764 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 174 2024-11-23T17:29:29,766 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=174, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } in 286 msec 2024-11-23T17:29:29,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=174 2024-11-23T17:29:29,796 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-11-23T17:29:29,800 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportExpiredSnapshot', row='0f52da1dbfca0bd9cfa3c01a4073547ce', locateType=CURRENT is [region=testtb-testExportExpiredSnapshot,,1732382968811.faff3508be89550a12635573fe1c9dc5., hostname=215bb12e999f,44667,1732382620918, seqNum=2] 2024-11-23T17:29:29,801 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportExpiredSnapshot', row='1a7d3ee1f17eeb44bbe6f3434a24d4528', locateType=CURRENT is [region=testtb-testExportExpiredSnapshot,1,1732382968811.653be934b06b3ef7ec2913ea01c4f27f., hostname=215bb12e999f,45467,1732382620632, seqNum=2] 2024-11-23T17:29:29,802 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportExpiredSnapshot', row='3edd5c926dd3c517662c7a935dffc2592', locateType=CURRENT is [region=testtb-testExportExpiredSnapshot,1,1732382968811.653be934b06b3ef7ec2913ea01c4f27f., hostname=215bb12e999f,45467,1732382620632, seqNum=2] 2024-11-23T17:29:29,803 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportExpiredSnapshot', row='2b16b010015f6597f2fc8c1e4b0ce1aeb', locateType=CURRENT is [region=testtb-testExportExpiredSnapshot,1,1732382968811.653be934b06b3ef7ec2913ea01c4f27f., hostname=215bb12e999f,45467,1732382620632, seqNum=2] 2024-11-23T17:29:29,804 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportExpiredSnapshot', row='4274020926882ba284ddcae3760dfcb4f', locateType=CURRENT is [region=testtb-testExportExpiredSnapshot,1,1732382968811.653be934b06b3ef7ec2913ea01c4f27f., hostname=215bb12e999f,45467,1732382620632, seqNum=2] 2024-11-23T17:29:29,806 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot 2024-11-23T17:29:29,806 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot Metrics about Tables on a single HBase RegionServer 2024-11-23T17:29:29,806 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44667 {}] regionserver.HRegion(8528): writing data to region testtb-testExportExpiredSnapshot,,1732382968811.faff3508be89550a12635573fe1c9dc5. with WAL disabled. Data may be lost in the event of a crash. 2024-11-23T17:29:29,807 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-23T17:29:29,807 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion 2024-11-23T17:29:29,808 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45467 {}] regionserver.HRegion(8528): writing data to region testtb-testExportExpiredSnapshot,1,1732382968811.653be934b06b3ef7ec2913ea01c4f27f. with WAL disabled. Data may be lost in the event of a crash. 2024-11-23T17:29:29,810 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-23T17:29:29,812 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportExpiredSnapshot 2024-11-23T17:29:29,812 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportExpiredSnapshot,,1732382968811.faff3508be89550a12635573fe1c9dc5. 2024-11-23T17:29:29,812 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T17:29:29,814 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-23T17:29:29,819 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-23T17:29:29,826 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-23T17:29:29,828 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-11-23T17:29:29,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732382969828 (current time:1732382969828). 2024-11-23T17:29:29,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-23T17:29:29,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-11-23T17:29:29,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-23T17:29:29,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2f0916b0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:29:29,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.ClusterIdFetcher(90): Going to request 215bb12e999f,39621,-1 for getting cluster id 2024-11-23T17:29:29,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:29:29,830 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d' 2024-11-23T17:29:29,830 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:29:29,830 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d" 2024-11-23T17:29:29,830 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@a6797f2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:29:29,830 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [215bb12e999f,39621,-1] 2024-11-23T17:29:29,831 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:29:29,831 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:29:29,832 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37920, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:29:29,833 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@35e877c1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:29:29,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:29:29,834 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=215bb12e999f,44667,1732382620918, seqNum=-1] 2024-11-23T17:29:29,834 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:29:29,835 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46316, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:29:29,836 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621. 2024-11-23T17:29:29,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:29:29,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:29:29,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:29:29,837 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:29:29,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3b27157e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:29:29,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.ClusterIdFetcher(90): Going to request 215bb12e999f,39621,-1 for getting cluster id 2024-11-23T17:29:29,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:29:29,841 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d' 2024-11-23T17:29:29,841 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:29:29,842 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d" 2024-11-23T17:29:29,842 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@40ccb3f0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:29:29,842 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [215bb12e999f,39621,-1] 2024-11-23T17:29:29,842 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:29:29,842 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:29:29,843 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37924, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:29:29,843 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3b73499c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:29:29,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:29:29,845 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=215bb12e999f,44667,1732382620918, seqNum=-1] 2024-11-23T17:29:29,845 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:29:29,846 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46326, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:29:29,847 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1732382625522.41264850465b123187f03a78b2521b39., hostname=215bb12e999f,41065,1732382621000, seqNum=2] 2024-11-23T17:29:29,848 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:29:29,848 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41300, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:29:29,850 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621. 2024-11-23T17:29:29,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor297.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:29:29,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:29:29,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:29:29,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-11-23T17:29:29,851 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:29:29,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-23T17:29:29,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure2.ProcedureExecutor(1139): Stored pid=177, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-11-23T17:29:29,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 177 2024-11-23T17:29:29,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=177 2024-11-23T17:29:29,853 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-23T17:29:29,855 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-23T17:29:29,862 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-23T17:29:29,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742217_1393 (size=165) 2024-11-23T17:29:29,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742217_1393 (size=165) 2024-11-23T17:29:29,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742217_1393 (size=165) 2024-11-23T17:29:29,886 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-23T17:29:29,887 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=178, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure faff3508be89550a12635573fe1c9dc5}, {pid=179, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 653be934b06b3ef7ec2913ea01c4f27f}] 2024-11-23T17:29:29,890 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=179, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 653be934b06b3ef7ec2913ea01c4f27f 2024-11-23T17:29:29,890 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=178, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure faff3508be89550a12635573fe1c9dc5 2024-11-23T17:29:29,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=177 2024-11-23T17:29:30,047 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44667 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=178 2024-11-23T17:29:30,047 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45467 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=179 2024-11-23T17:29:30,047 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,1,1732382968811.653be934b06b3ef7ec2913ea01c4f27f. 2024-11-23T17:29:30,048 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HRegion(2902): Flushing 653be934b06b3ef7ec2913ea01c4f27f 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-11-23T17:29:30,048 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,,1732382968811.faff3508be89550a12635573fe1c9dc5. 2024-11-23T17:29:30,048 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HRegion(2902): Flushing faff3508be89550a12635573fe1c9dc5 1/1 column families, dataSize=132 B heapSize=544 B 2024-11-23T17:29:30,072 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportExpiredSnapshot/faff3508be89550a12635573fe1c9dc5/.tmp/cf/d19cd4f207bb4d88ba456a111f85791d is 71, key is 0e89359905e2f84b7eea9c6ffc626c81/cf:q/1732382969806/Put/seqid=0 2024-11-23T17:29:30,081 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportExpiredSnapshot/653be934b06b3ef7ec2913ea01c4f27f/.tmp/cf/f98950490c484432855b5524300e91d7 is 71, key is 18cd04163daf53cb530babfd66d9158e/cf:q/1732382969808/Put/seqid=0 2024-11-23T17:29:30,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742218_1394 (size=5216) 2024-11-23T17:29:30,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742218_1394 (size=5216) 2024-11-23T17:29:30,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742218_1394 (size=5216) 2024-11-23T17:29:30,112 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=132 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportExpiredSnapshot/faff3508be89550a12635573fe1c9dc5/.tmp/cf/d19cd4f207bb4d88ba456a111f85791d 2024-11-23T17:29:30,125 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportExpiredSnapshot/faff3508be89550a12635573fe1c9dc5/.tmp/cf/d19cd4f207bb4d88ba456a111f85791d as hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportExpiredSnapshot/faff3508be89550a12635573fe1c9dc5/cf/d19cd4f207bb4d88ba456a111f85791d 2024-11-23T17:29:30,134 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportExpiredSnapshot/faff3508be89550a12635573fe1c9dc5/cf/d19cd4f207bb4d88ba456a111f85791d, entries=2, sequenceid=6, filesize=5.1 K 2024-11-23T17:29:30,135 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HRegion(3140): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for faff3508be89550a12635573fe1c9dc5 in 87ms, sequenceid=6, compaction requested=false 2024-11-23T17:29:30,136 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportExpiredSnapshot' 2024-11-23T17:29:30,136 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HRegion(2603): Flush status journal for faff3508be89550a12635573fe1c9dc5: 2024-11-23T17:29:30,136 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,,1732382968811.faff3508be89550a12635573fe1c9dc5. for snaptb0-testExportExpiredSnapshot completed. 2024-11-23T17:29:30,137 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,,1732382968811.faff3508be89550a12635573fe1c9dc5.' region-info for snapshot=snaptb0-testExportExpiredSnapshot 2024-11-23T17:29:30,137 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:29:30,137 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportExpiredSnapshot/faff3508be89550a12635573fe1c9dc5/cf/d19cd4f207bb4d88ba456a111f85791d] hfiles 2024-11-23T17:29:30,137 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportExpiredSnapshot/faff3508be89550a12635573fe1c9dc5/cf/d19cd4f207bb4d88ba456a111f85791d for snapshot=snaptb0-testExportExpiredSnapshot 2024-11-23T17:29:30,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742219_1395 (size=8392) 2024-11-23T17:29:30,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742219_1395 (size=8392) 2024-11-23T17:29:30,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742219_1395 (size=8392) 2024-11-23T17:29:30,152 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.13 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportExpiredSnapshot/653be934b06b3ef7ec2913ea01c4f27f/.tmp/cf/f98950490c484432855b5524300e91d7 2024-11-23T17:29:30,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=177 2024-11-23T17:29:30,168 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportExpiredSnapshot/653be934b06b3ef7ec2913ea01c4f27f/.tmp/cf/f98950490c484432855b5524300e91d7 as hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportExpiredSnapshot/653be934b06b3ef7ec2913ea01c4f27f/cf/f98950490c484432855b5524300e91d7 2024-11-23T17:29:30,177 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportExpiredSnapshot/653be934b06b3ef7ec2913ea01c4f27f/cf/f98950490c484432855b5524300e91d7, entries=48, sequenceid=6, filesize=8.2 K 2024-11-23T17:29:30,178 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HRegion(3140): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 653be934b06b3ef7ec2913ea01c4f27f in 130ms, sequenceid=6, compaction requested=false 2024-11-23T17:29:30,178 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HRegion(2603): Flush status journal for 653be934b06b3ef7ec2913ea01c4f27f: 2024-11-23T17:29:30,179 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,1,1732382968811.653be934b06b3ef7ec2913ea01c4f27f. for snaptb0-testExportExpiredSnapshot completed. 2024-11-23T17:29:30,179 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,1,1732382968811.653be934b06b3ef7ec2913ea01c4f27f.' region-info for snapshot=snaptb0-testExportExpiredSnapshot 2024-11-23T17:29:30,179 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:29:30,179 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportExpiredSnapshot/653be934b06b3ef7ec2913ea01c4f27f/cf/f98950490c484432855b5524300e91d7] hfiles 2024-11-23T17:29:30,179 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportExpiredSnapshot/653be934b06b3ef7ec2913ea01c4f27f/cf/f98950490c484432855b5524300e91d7 for snapshot=snaptb0-testExportExpiredSnapshot 2024-11-23T17:29:30,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742220_1396 (size=110) 2024-11-23T17:29:30,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742220_1396 (size=110) 2024-11-23T17:29:30,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742220_1396 (size=110) 2024-11-23T17:29:30,237 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,,1732382968811.faff3508be89550a12635573fe1c9dc5. 2024-11-23T17:29:30,237 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=178 2024-11-23T17:29:30,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.HMaster(4169): Remote procedure done, pid=178 2024-11-23T17:29:30,238 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportExpiredSnapshot on region faff3508be89550a12635573fe1c9dc5 2024-11-23T17:29:30,240 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=178, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure faff3508be89550a12635573fe1c9dc5 2024-11-23T17:29:30,252 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=178, ppid=177, state=SUCCESS, hasLock=false; SnapshotRegionProcedure faff3508be89550a12635573fe1c9dc5 in 359 msec 2024-11-23T17:29:30,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742221_1397 (size=110) 2024-11-23T17:29:30,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742221_1397 (size=110) 2024-11-23T17:29:30,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742221_1397 (size=110) 2024-11-23T17:29:30,259 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,1,1732382968811.653be934b06b3ef7ec2913ea01c4f27f. 2024-11-23T17:29:30,259 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=179 2024-11-23T17:29:30,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.HMaster(4169): Remote procedure done, pid=179 2024-11-23T17:29:30,260 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportExpiredSnapshot on region 653be934b06b3ef7ec2913ea01c4f27f 2024-11-23T17:29:30,260 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=179, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 653be934b06b3ef7ec2913ea01c4f27f 2024-11-23T17:29:30,264 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=179, resume processing ppid=177 2024-11-23T17:29:30,264 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=179, ppid=177, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 653be934b06b3ef7ec2913ea01c4f27f in 375 msec 2024-11-23T17:29:30,264 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-23T17:29:30,266 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-23T17:29:30,268 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-23T17:29:30,268 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportExpiredSnapshot 2024-11-23T17:29:30,269 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/.tmp/snaptb0-testExportExpiredSnapshot 2024-11-23T17:29:30,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742222_1398 (size=630) 2024-11-23T17:29:30,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742222_1398 (size=630) 2024-11-23T17:29:30,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742222_1398 (size=630) 2024-11-23T17:29:30,347 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-23T17:29:30,382 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-23T17:29:30,383 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/.tmp/snaptb0-testExportExpiredSnapshot to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/snaptb0-testExportExpiredSnapshot 2024-11-23T17:29:30,384 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-23T17:29:30,385 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 177 2024-11-23T17:29:30,386 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=177, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } in 534 msec 2024-11-23T17:29:30,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=177 2024-11-23T17:29:30,477 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-11-23T17:29:30,481 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T17:29:30,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure2.ProcedureExecutor(1139): Stored pid=180, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testExportExpiredSnapshot 2024-11-23T17:29:30,483 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T17:29:30,483 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:29:30,485 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportExpiredSnapshot" procId is: 180 2024-11-23T17:29:30,486 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T17:29:30,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-11-23T17:29:30,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742223_1399 (size=400) 2024-11-23T17:29:30,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742223_1399 (size=400) 2024-11-23T17:29:30,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742223_1399 (size=400) 2024-11-23T17:29:30,513 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 6f88863827b64acc564823a1044a6b2b, NAME => 'testExportExpiredSnapshot,,1732382970480.6f88863827b64acc564823a1044a6b2b.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf 2024-11-23T17:29:30,522 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 5e511dfc454574bea858d074620b30f9, NAME => 'testExportExpiredSnapshot,1,1732382970480.5e511dfc454574bea858d074620b30f9.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf 2024-11-23T17:29:30,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742224_1400 (size=61) 2024-11-23T17:29:30,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742224_1400 (size=61) 2024-11-23T17:29:30,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742224_1400 (size=61) 2024-11-23T17:29:30,540 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,,1732382970480.6f88863827b64acc564823a1044a6b2b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:29:30,540 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1722): Closing 6f88863827b64acc564823a1044a6b2b, disabling compactions & flushes 2024-11-23T17:29:30,540 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,,1732382970480.6f88863827b64acc564823a1044a6b2b. 2024-11-23T17:29:30,540 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,,1732382970480.6f88863827b64acc564823a1044a6b2b. 2024-11-23T17:29:30,540 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,,1732382970480.6f88863827b64acc564823a1044a6b2b. after waiting 0 ms 2024-11-23T17:29:30,540 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,,1732382970480.6f88863827b64acc564823a1044a6b2b. 2024-11-23T17:29:30,540 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,,1732382970480.6f88863827b64acc564823a1044a6b2b. 2024-11-23T17:29:30,540 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1676): Region close journal for 6f88863827b64acc564823a1044a6b2b: Waiting for close lock at 1732382970540Disabling compacts and flushes for region at 1732382970540Disabling writes for close at 1732382970540Writing region close event to WAL at 1732382970540Closed at 1732382970540 2024-11-23T17:29:30,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742225_1401 (size=61) 2024-11-23T17:29:30,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742225_1401 (size=61) 2024-11-23T17:29:30,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742225_1401 (size=61) 2024-11-23T17:29:30,551 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,1,1732382970480.5e511dfc454574bea858d074620b30f9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:29:30,551 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1722): Closing 5e511dfc454574bea858d074620b30f9, disabling compactions & flushes 2024-11-23T17:29:30,551 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,1,1732382970480.5e511dfc454574bea858d074620b30f9. 2024-11-23T17:29:30,551 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,1,1732382970480.5e511dfc454574bea858d074620b30f9. 2024-11-23T17:29:30,551 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,1,1732382970480.5e511dfc454574bea858d074620b30f9. after waiting 0 ms 2024-11-23T17:29:30,551 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,1,1732382970480.5e511dfc454574bea858d074620b30f9. 2024-11-23T17:29:30,551 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,1,1732382970480.5e511dfc454574bea858d074620b30f9. 2024-11-23T17:29:30,551 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1676): Region close journal for 5e511dfc454574bea858d074620b30f9: Waiting for close lock at 1732382970551Disabling compacts and flushes for region at 1732382970551Disabling writes for close at 1732382970551Writing region close event to WAL at 1732382970551Closed at 1732382970551 2024-11-23T17:29:30,552 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T17:29:30,553 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportExpiredSnapshot,,1732382970480.6f88863827b64acc564823a1044a6b2b.","families":{"info":[{"qualifier":"regioninfo","vlen":60,"tag":[],"timestamp":"1732382970552"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732382970552"}]},"ts":"1732382970552"} 2024-11-23T17:29:30,553 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportExpiredSnapshot,1,1732382970480.5e511dfc454574bea858d074620b30f9.","families":{"info":[{"qualifier":"regioninfo","vlen":60,"tag":[],"timestamp":"1732382970552"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732382970552"}]},"ts":"1732382970552"} 2024-11-23T17:29:30,555 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-23T17:29:30,556 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T17:29:30,557 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732382970556"}]},"ts":"1732382970556"} 2024-11-23T17:29:30,559 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportExpiredSnapshot, state=ENABLING in hbase:meta 2024-11-23T17:29:30,559 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {215bb12e999f=0} racks are {/default-rack=0} 2024-11-23T17:29:30,561 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-23T17:29:30,561 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-23T17:29:30,561 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-23T17:29:30,561 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-23T17:29:30,561 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-23T17:29:30,561 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-23T17:29:30,561 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-23T17:29:30,561 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-23T17:29:30,561 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-23T17:29:30,561 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-23T17:29:30,561 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=6f88863827b64acc564823a1044a6b2b, ASSIGN}, {pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=5e511dfc454574bea858d074620b30f9, ASSIGN}] 2024-11-23T17:29:30,562 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=5e511dfc454574bea858d074620b30f9, ASSIGN 2024-11-23T17:29:30,562 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=6f88863827b64acc564823a1044a6b2b, ASSIGN 2024-11-23T17:29:30,563 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=5e511dfc454574bea858d074620b30f9, ASSIGN; state=OFFLINE, location=215bb12e999f,45467,1732382620632; forceNewPlan=false, retain=false 2024-11-23T17:29:30,563 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=6f88863827b64acc564823a1044a6b2b, ASSIGN; state=OFFLINE, location=215bb12e999f,44667,1732382620918; forceNewPlan=false, retain=false 2024-11-23T17:29:30,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-11-23T17:29:30,714 INFO [215bb12e999f:39621 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-23T17:29:30,714 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=182 updating hbase:meta row=5e511dfc454574bea858d074620b30f9, regionState=OPENING, regionLocation=215bb12e999f,45467,1732382620632 2024-11-23T17:29:30,715 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=181 updating hbase:meta row=6f88863827b64acc564823a1044a6b2b, regionState=OPENING, regionLocation=215bb12e999f,44667,1732382620918 2024-11-23T17:29:30,716 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=5e511dfc454574bea858d074620b30f9, ASSIGN because future has completed 2024-11-23T17:29:30,717 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=183, ppid=182, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5e511dfc454574bea858d074620b30f9, server=215bb12e999f,45467,1732382620632}] 2024-11-23T17:29:30,718 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=6f88863827b64acc564823a1044a6b2b, ASSIGN because future has completed 2024-11-23T17:29:30,718 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=184, ppid=181, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6f88863827b64acc564823a1044a6b2b, server=215bb12e999f,44667,1732382620918}] 2024-11-23T17:29:30,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-11-23T17:29:30,876 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] handler.AssignRegionHandler(132): Open testExportExpiredSnapshot,1,1732382970480.5e511dfc454574bea858d074620b30f9. 2024-11-23T17:29:30,876 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7752): Opening region: {ENCODED => 5e511dfc454574bea858d074620b30f9, NAME => 'testExportExpiredSnapshot,1,1732382970480.5e511dfc454574bea858d074620b30f9.', STARTKEY => '1', ENDKEY => ''} 2024-11-23T17:29:30,876 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] handler.AssignRegionHandler(132): Open testExportExpiredSnapshot,,1732382970480.6f88863827b64acc564823a1044a6b2b. 2024-11-23T17:29:30,876 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7752): Opening region: {ENCODED => 6f88863827b64acc564823a1044a6b2b, NAME => 'testExportExpiredSnapshot,,1732382970480.6f88863827b64acc564823a1044a6b2b.', STARTKEY => '', ENDKEY => '1'} 2024-11-23T17:29:30,876 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportExpiredSnapshot,1,1732382970480.5e511dfc454574bea858d074620b30f9. service=AccessControlService 2024-11-23T17:29:30,876 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:29:30,876 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportExpiredSnapshot,,1732382970480.6f88863827b64acc564823a1044a6b2b. service=AccessControlService 2024-11-23T17:29:30,877 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot 5e511dfc454574bea858d074620b30f9 2024-11-23T17:29:30,877 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:29:30,877 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot 6f88863827b64acc564823a1044a6b2b 2024-11-23T17:29:30,877 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,,1732382970480.6f88863827b64acc564823a1044a6b2b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:29:30,877 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,1,1732382970480.5e511dfc454574bea858d074620b30f9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:29:30,877 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7794): checking encryption for 6f88863827b64acc564823a1044a6b2b 2024-11-23T17:29:30,877 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7797): checking classloading for 6f88863827b64acc564823a1044a6b2b 2024-11-23T17:29:30,877 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7794): checking encryption for 5e511dfc454574bea858d074620b30f9 2024-11-23T17:29:30,877 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7797): checking classloading for 5e511dfc454574bea858d074620b30f9 2024-11-23T17:29:30,892 INFO [StoreOpener-6f88863827b64acc564823a1044a6b2b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 6f88863827b64acc564823a1044a6b2b 2024-11-23T17:29:30,892 INFO [StoreOpener-5e511dfc454574bea858d074620b30f9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 5e511dfc454574bea858d074620b30f9 2024-11-23T17:29:30,893 INFO [StoreOpener-6f88863827b64acc564823a1044a6b2b-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6f88863827b64acc564823a1044a6b2b columnFamilyName cf 2024-11-23T17:29:30,893 DEBUG [StoreOpener-6f88863827b64acc564823a1044a6b2b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:29:30,894 INFO [StoreOpener-5e511dfc454574bea858d074620b30f9-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5e511dfc454574bea858d074620b30f9 columnFamilyName cf 2024-11-23T17:29:30,894 INFO [StoreOpener-6f88863827b64acc564823a1044a6b2b-1 {}] regionserver.HStore(327): Store=6f88863827b64acc564823a1044a6b2b/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T17:29:30,894 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1038): replaying wal for 6f88863827b64acc564823a1044a6b2b 2024-11-23T17:29:30,894 DEBUG [StoreOpener-5e511dfc454574bea858d074620b30f9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:29:30,895 INFO [StoreOpener-5e511dfc454574bea858d074620b30f9-1 {}] regionserver.HStore(327): Store=5e511dfc454574bea858d074620b30f9/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T17:29:30,895 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1038): replaying wal for 5e511dfc454574bea858d074620b30f9 2024-11-23T17:29:30,895 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportExpiredSnapshot/6f88863827b64acc564823a1044a6b2b 2024-11-23T17:29:30,895 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportExpiredSnapshot/6f88863827b64acc564823a1044a6b2b 2024-11-23T17:29:30,895 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportExpiredSnapshot/5e511dfc454574bea858d074620b30f9 2024-11-23T17:29:30,896 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1048): stopping wal replay for 6f88863827b64acc564823a1044a6b2b 2024-11-23T17:29:30,896 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportExpiredSnapshot/5e511dfc454574bea858d074620b30f9 2024-11-23T17:29:30,896 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1060): Cleaning up temporary data for 6f88863827b64acc564823a1044a6b2b 2024-11-23T17:29:30,896 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1048): stopping wal replay for 5e511dfc454574bea858d074620b30f9 2024-11-23T17:29:30,896 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1060): Cleaning up temporary data for 5e511dfc454574bea858d074620b30f9 2024-11-23T17:29:30,897 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1093): writing seq id for 6f88863827b64acc564823a1044a6b2b 2024-11-23T17:29:30,897 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1093): writing seq id for 5e511dfc454574bea858d074620b30f9 2024-11-23T17:29:30,900 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportExpiredSnapshot/6f88863827b64acc564823a1044a6b2b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T17:29:30,901 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1114): Opened 6f88863827b64acc564823a1044a6b2b; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63470397, jitterRate=-0.0542173832654953}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T17:29:30,901 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 6f88863827b64acc564823a1044a6b2b 2024-11-23T17:29:30,902 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1006): Region open journal for 6f88863827b64acc564823a1044a6b2b: Running coprocessor pre-open hook at 1732382970877Writing region info on filesystem at 1732382970877Initializing all the Stores at 1732382970881 (+4 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732382970881Cleaning up temporary data from old regions at 1732382970896 (+15 ms)Running coprocessor post-open hooks at 1732382970901 (+5 ms)Region opened successfully at 1732382970902 (+1 ms) 2024-11-23T17:29:30,903 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportExpiredSnapshot,,1732382970480.6f88863827b64acc564823a1044a6b2b., pid=184, masterSystemTime=1732382970873 2024-11-23T17:29:30,908 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportExpiredSnapshot,,1732382970480.6f88863827b64acc564823a1044a6b2b. 2024-11-23T17:29:30,908 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] handler.AssignRegionHandler(153): Opened testExportExpiredSnapshot,,1732382970480.6f88863827b64acc564823a1044a6b2b. 2024-11-23T17:29:30,909 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=181 updating hbase:meta row=6f88863827b64acc564823a1044a6b2b, regionState=OPEN, openSeqNum=2, regionLocation=215bb12e999f,44667,1732382620918 2024-11-23T17:29:30,910 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportExpiredSnapshot/5e511dfc454574bea858d074620b30f9/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T17:29:30,911 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1114): Opened 5e511dfc454574bea858d074620b30f9; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66742081, jitterRate=-0.005465492606163025}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T17:29:30,912 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 5e511dfc454574bea858d074620b30f9 2024-11-23T17:29:30,912 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1006): Region open journal for 5e511dfc454574bea858d074620b30f9: Running coprocessor pre-open hook at 1732382970877Writing region info on filesystem at 1732382970877Initializing all the Stores at 1732382970881 (+4 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732382970881Cleaning up temporary data from old regions at 1732382970896 (+15 ms)Running coprocessor post-open hooks at 1732382970912 (+16 ms)Region opened successfully at 1732382970912 2024-11-23T17:29:30,912 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=184, ppid=181, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6f88863827b64acc564823a1044a6b2b, server=215bb12e999f,44667,1732382620918 because future has completed 2024-11-23T17:29:30,914 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportExpiredSnapshot,1,1732382970480.5e511dfc454574bea858d074620b30f9., pid=183, masterSystemTime=1732382970873 2024-11-23T17:29:30,917 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportExpiredSnapshot,1,1732382970480.5e511dfc454574bea858d074620b30f9. 2024-11-23T17:29:30,917 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] handler.AssignRegionHandler(153): Opened testExportExpiredSnapshot,1,1732382970480.5e511dfc454574bea858d074620b30f9. 2024-11-23T17:29:30,918 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=182 updating hbase:meta row=5e511dfc454574bea858d074620b30f9, regionState=OPEN, openSeqNum=2, regionLocation=215bb12e999f,45467,1732382620632 2024-11-23T17:29:30,920 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=184, resume processing ppid=181 2024-11-23T17:29:30,920 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=184, ppid=181, state=SUCCESS, hasLock=false; OpenRegionProcedure 6f88863827b64acc564823a1044a6b2b, server=215bb12e999f,44667,1732382620918 in 196 msec 2024-11-23T17:29:30,921 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=183, ppid=182, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5e511dfc454574bea858d074620b30f9, server=215bb12e999f,45467,1732382620632 because future has completed 2024-11-23T17:29:30,925 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=181, ppid=180, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=6f88863827b64acc564823a1044a6b2b, ASSIGN in 359 msec 2024-11-23T17:29:30,926 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=183, resume processing ppid=182 2024-11-23T17:29:30,926 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=183, ppid=182, state=SUCCESS, hasLock=false; OpenRegionProcedure 5e511dfc454574bea858d074620b30f9, server=215bb12e999f,45467,1732382620632 in 207 msec 2024-11-23T17:29:30,928 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=182, resume processing ppid=180 2024-11-23T17:29:30,928 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=182, ppid=180, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=5e511dfc454574bea858d074620b30f9, ASSIGN in 365 msec 2024-11-23T17:29:30,929 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T17:29:30,929 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732382970929"}]},"ts":"1732382970929"} 2024-11-23T17:29:30,931 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportExpiredSnapshot, state=ENABLED in hbase:meta 2024-11-23T17:29:30,932 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T17:29:30,933 DEBUG [PEWorker-4 {}] access.PermissionStorage(177): Writing permission with rowKey testExportExpiredSnapshot jenkins: RWXCA 2024-11-23T17:29:30,936 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41065 {}] access.PermissionStorage(613): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-11-23T17:29:30,993 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45467-0x10168b8ee290001, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:29:30,993 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x10168b8ee290003, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:29:30,993 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44667-0x10168b8ee290002, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:29:30,993 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:29:31,070 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:29:31,070 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:29:31,070 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:29:31,070 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:29:31,070 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:29:31,070 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:29:31,070 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:29:31,070 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:29:31,071 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=180, state=SUCCESS, hasLock=false; CreateTableProcedure table=testExportExpiredSnapshot in 588 msec 2024-11-23T17:29:31,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-11-23T17:29:31,115 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testExportExpiredSnapshot completed 2024-11-23T17:29:31,116 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testExportExpiredSnapshot get assigned. Timeout = 60000ms 2024-11-23T17:29:31,116 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T17:29:31,119 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testExportExpiredSnapshot assigned to meta. Checking AM states. 2024-11-23T17:29:31,119 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T17:29:31,119 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testExportExpiredSnapshot assigned. 2024-11-23T17:29:31,119 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-23T17:29:31,124 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportExpiredSnapshot', row='06c9b880c513cbfac81b39583159fe13a', locateType=CURRENT is [region=testExportExpiredSnapshot,,1732382970480.6f88863827b64acc564823a1044a6b2b., hostname=215bb12e999f,44667,1732382620918, seqNum=2] 2024-11-23T17:29:31,124 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportExpiredSnapshot', row='153c20fc437f79c3164883e2ff9dc5e54', locateType=CURRENT is [region=testExportExpiredSnapshot,1,1732382970480.5e511dfc454574bea858d074620b30f9., hostname=215bb12e999f,45467,1732382620632, seqNum=2] 2024-11-23T17:29:31,125 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportExpiredSnapshot', row='24d8e93305ba2f5d210ba239423ee6e84', locateType=CURRENT is [region=testExportExpiredSnapshot,1,1732382970480.5e511dfc454574bea858d074620b30f9., hostname=215bb12e999f,45467,1732382620632, seqNum=2] 2024-11-23T17:29:31,126 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportExpiredSnapshot', row='39fc7ca6f2644bc8e2516f2488a42130f', locateType=CURRENT is [region=testExportExpiredSnapshot,1,1732382970480.5e511dfc454574bea858d074620b30f9., hostname=215bb12e999f,45467,1732382620632, seqNum=2] 2024-11-23T17:29:31,127 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportExpiredSnapshot', row='4c24dfc48afdc2a78bef379023a85f029', locateType=CURRENT is [region=testExportExpiredSnapshot,1,1732382970480.5e511dfc454574bea858d074620b30f9., hostname=215bb12e999f,45467,1732382620632, seqNum=2] 2024-11-23T17:29:31,130 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44667 {}] regionserver.HRegion(8528): writing data to region testExportExpiredSnapshot,,1732382970480.6f88863827b64acc564823a1044a6b2b. with WAL disabled. Data may be lost in the event of a crash. 2024-11-23T17:29:31,131 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45467 {}] regionserver.HRegion(8528): writing data to region testExportExpiredSnapshot,1,1732382970480.5e511dfc454574bea858d074620b30f9. with WAL disabled. Data may be lost in the event of a crash. 2024-11-23T17:29:31,132 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-23T17:29:31,135 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportExpiredSnapshot 2024-11-23T17:29:31,135 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportExpiredSnapshot,,1732382970480.6f88863827b64acc564823a1044a6b2b. 2024-11-23T17:29:31,135 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T17:29:31,137 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-23T17:29:31,141 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-23T17:29:31,146 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } 2024-11-23T17:29:31,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snapshot-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-11-23T17:29:31,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-23T17:29:31,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@16b55f7e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:29:31,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.ClusterIdFetcher(90): Going to request 215bb12e999f,39621,-1 for getting cluster id 2024-11-23T17:29:31,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:29:31,147 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d' 2024-11-23T17:29:31,147 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:29:31,147 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d" 2024-11-23T17:29:31,147 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@ce047c3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:29:31,147 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [215bb12e999f,39621,-1] 2024-11-23T17:29:31,147 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:29:31,148 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:29:31,148 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37932, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:29:31,149 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f56813c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:29:31,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:29:31,150 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=215bb12e999f,44667,1732382620918, seqNum=-1] 2024-11-23T17:29:31,150 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:29:31,151 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46328, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:29:31,151 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621. 2024-11-23T17:29:31,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:29:31,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:29:31,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:29:31,152 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:29:31,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@13489b22, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:29:31,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.ClusterIdFetcher(90): Going to request 215bb12e999f,39621,-1 for getting cluster id 2024-11-23T17:29:31,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:29:31,153 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d' 2024-11-23T17:29:31,153 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:29:31,153 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d" 2024-11-23T17:29:31,153 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@139d1a9f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:29:31,153 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [215bb12e999f,39621,-1] 2024-11-23T17:29:31,154 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:29:31,154 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:29:31,154 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37964, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:29:31,155 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@33bdfe17, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:29:31,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:29:31,156 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=215bb12e999f,44667,1732382620918, seqNum=-1] 2024-11-23T17:29:31,156 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:29:31,156 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46342, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:29:31,158 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1732382625522.41264850465b123187f03a78b2521b39., hostname=215bb12e999f,41065,1732382621000, seqNum=2] 2024-11-23T17:29:31,158 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:29:31,159 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41304, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:29:31,160 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621. 2024-11-23T17:29:31,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor297.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:29:31,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:29:31,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:29:31,160 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:29:31,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] access.PermissionStorage(613): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-11-23T17:29:31,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-23T17:29:31,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure2.ProcedureExecutor(1139): Stored pid=185, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } 2024-11-23T17:29:31,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }, snapshot procedure id = 185 2024-11-23T17:29:31,162 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_PREPARE 2024-11-23T17:29:31,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=185 2024-11-23T17:29:31,163 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-23T17:29:31,165 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-23T17:29:31,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742226_1402 (size=152) 2024-11-23T17:29:31,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742226_1402 (size=152) 2024-11-23T17:29:31,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742226_1402 (size=152) 2024-11-23T17:29:31,174 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-23T17:29:31,174 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=186, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6f88863827b64acc564823a1044a6b2b}, {pid=187, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5e511dfc454574bea858d074620b30f9}] 2024-11-23T17:29:31,175 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=187, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5e511dfc454574bea858d074620b30f9 2024-11-23T17:29:31,175 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=186, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6f88863827b64acc564823a1044a6b2b 2024-11-23T17:29:31,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=185 2024-11-23T17:29:31,326 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44667 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=186 2024-11-23T17:29:31,326 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45467 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=187 2024-11-23T17:29:31,327 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportExpiredSnapshot,1,1732382970480.5e511dfc454574bea858d074620b30f9. 2024-11-23T17:29:31,327 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportExpiredSnapshot,,1732382970480.6f88863827b64acc564823a1044a6b2b. 2024-11-23T17:29:31,327 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegion(2902): Flushing 6f88863827b64acc564823a1044a6b2b 1/1 column families, dataSize=333 B heapSize=976 B 2024-11-23T17:29:31,327 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegion(2902): Flushing 5e511dfc454574bea858d074620b30f9 1/1 column families, dataSize=2.93 KB heapSize=6.58 KB 2024-11-23T17:29:31,341 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportExpiredSnapshot/6f88863827b64acc564823a1044a6b2b/.tmp/cf/7595e5e4ca014406a29ecd9b6ef0c070 is 71, key is 01764ccf7a9507d019628ef2747d1af3/cf:q/1732382971130/Put/seqid=0 2024-11-23T17:29:31,341 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportExpiredSnapshot/5e511dfc454574bea858d074620b30f9/.tmp/cf/af62d8721a524145babd07533cf7f7f4 is 71, key is 16de7d21bc6655c89646b7fe930e8027/cf:q/1732382971131/Put/seqid=0 2024-11-23T17:29:31,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742227_1403 (size=5422) 2024-11-23T17:29:31,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742227_1403 (size=5422) 2024-11-23T17:29:31,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742227_1403 (size=5422) 2024-11-23T17:29:31,353 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=333 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportExpiredSnapshot/6f88863827b64acc564823a1044a6b2b/.tmp/cf/7595e5e4ca014406a29ecd9b6ef0c070 2024-11-23T17:29:31,357 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportExpiredSnapshot/6f88863827b64acc564823a1044a6b2b/.tmp/cf/7595e5e4ca014406a29ecd9b6ef0c070 as hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportExpiredSnapshot/6f88863827b64acc564823a1044a6b2b/cf/7595e5e4ca014406a29ecd9b6ef0c070 2024-11-23T17:29:31,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742228_1404 (size=8190) 2024-11-23T17:29:31,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742228_1404 (size=8190) 2024-11-23T17:29:31,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742228_1404 (size=8190) 2024-11-23T17:29:31,360 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.93 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportExpiredSnapshot/5e511dfc454574bea858d074620b30f9/.tmp/cf/af62d8721a524145babd07533cf7f7f4 2024-11-23T17:29:31,362 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportExpiredSnapshot/6f88863827b64acc564823a1044a6b2b/cf/7595e5e4ca014406a29ecd9b6ef0c070, entries=5, sequenceid=5, filesize=5.3 K 2024-11-23T17:29:31,363 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegion(3140): Finished flush of dataSize ~333 B/333, heapSize ~960 B/960, currentSize=0 B/0 for 6f88863827b64acc564823a1044a6b2b in 36ms, sequenceid=5, compaction requested=false 2024-11-23T17:29:31,363 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportExpiredSnapshot' 2024-11-23T17:29:31,364 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegion(2603): Flush status journal for 6f88863827b64acc564823a1044a6b2b: 2024-11-23T17:29:31,364 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportExpiredSnapshot,,1732382970480.6f88863827b64acc564823a1044a6b2b. for snapshot-testExportExpiredSnapshot completed. 2024-11-23T17:29:31,364 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(241): Storing 'testExportExpiredSnapshot,,1732382970480.6f88863827b64acc564823a1044a6b2b.' region-info for snapshot=snapshot-testExportExpiredSnapshot 2024-11-23T17:29:31,364 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:29:31,364 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportExpiredSnapshot/6f88863827b64acc564823a1044a6b2b/cf/7595e5e4ca014406a29ecd9b6ef0c070] hfiles 2024-11-23T17:29:31,364 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportExpiredSnapshot/6f88863827b64acc564823a1044a6b2b/cf/7595e5e4ca014406a29ecd9b6ef0c070 for snapshot=snapshot-testExportExpiredSnapshot 2024-11-23T17:29:31,365 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportExpiredSnapshot/5e511dfc454574bea858d074620b30f9/.tmp/cf/af62d8721a524145babd07533cf7f7f4 as hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportExpiredSnapshot/5e511dfc454574bea858d074620b30f9/cf/af62d8721a524145babd07533cf7f7f4 2024-11-23T17:29:31,369 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportExpiredSnapshot/5e511dfc454574bea858d074620b30f9/cf/af62d8721a524145babd07533cf7f7f4, entries=45, sequenceid=5, filesize=8.0 K 2024-11-23T17:29:31,370 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegion(3140): Finished flush of dataSize ~2.93 KB/3003, heapSize ~6.56 KB/6720, currentSize=0 B/0 for 5e511dfc454574bea858d074620b30f9 in 43ms, sequenceid=5, compaction requested=false 2024-11-23T17:29:31,370 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegion(2603): Flush status journal for 5e511dfc454574bea858d074620b30f9: 2024-11-23T17:29:31,370 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportExpiredSnapshot,1,1732382970480.5e511dfc454574bea858d074620b30f9. for snapshot-testExportExpiredSnapshot completed. 2024-11-23T17:29:31,370 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(241): Storing 'testExportExpiredSnapshot,1,1732382970480.5e511dfc454574bea858d074620b30f9.' region-info for snapshot=snapshot-testExportExpiredSnapshot 2024-11-23T17:29:31,370 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:29:31,370 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportExpiredSnapshot/5e511dfc454574bea858d074620b30f9/cf/af62d8721a524145babd07533cf7f7f4] hfiles 2024-11-23T17:29:31,370 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportExpiredSnapshot/5e511dfc454574bea858d074620b30f9/cf/af62d8721a524145babd07533cf7f7f4 for snapshot=snapshot-testExportExpiredSnapshot 2024-11-23T17:29:31,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742229_1405 (size=103) 2024-11-23T17:29:31,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742229_1405 (size=103) 2024-11-23T17:29:31,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742229_1405 (size=103) 2024-11-23T17:29:31,374 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportExpiredSnapshot,,1732382970480.6f88863827b64acc564823a1044a6b2b. 2024-11-23T17:29:31,374 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=186 2024-11-23T17:29:31,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.HMaster(4169): Remote procedure done, pid=186 2024-11-23T17:29:31,375 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportExpiredSnapshot on region 6f88863827b64acc564823a1044a6b2b 2024-11-23T17:29:31,375 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=186, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6f88863827b64acc564823a1044a6b2b 2024-11-23T17:29:31,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742230_1406 (size=103) 2024-11-23T17:29:31,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742230_1406 (size=103) 2024-11-23T17:29:31,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742230_1406 (size=103) 2024-11-23T17:29:31,377 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=186, ppid=185, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 6f88863827b64acc564823a1044a6b2b in 202 msec 2024-11-23T17:29:31,377 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportExpiredSnapshot,1,1732382970480.5e511dfc454574bea858d074620b30f9. 2024-11-23T17:29:31,377 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=187 2024-11-23T17:29:31,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.HMaster(4169): Remote procedure done, pid=187 2024-11-23T17:29:31,378 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportExpiredSnapshot on region 5e511dfc454574bea858d074620b30f9 2024-11-23T17:29:31,378 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=187, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5e511dfc454574bea858d074620b30f9 2024-11-23T17:29:31,380 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=187, resume processing ppid=185 2024-11-23T17:29:31,380 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=187, ppid=185, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 5e511dfc454574bea858d074620b30f9 in 204 msec 2024-11-23T17:29:31,380 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-23T17:29:31,381 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-23T17:29:31,381 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-23T17:29:31,381 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snapshot-testExportExpiredSnapshot 2024-11-23T17:29:31,382 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot 2024-11-23T17:29:31,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742231_1407 (size=609) 2024-11-23T17:29:31,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742231_1407 (size=609) 2024-11-23T17:29:31,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742231_1407 (size=609) 2024-11-23T17:29:31,391 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-23T17:29:31,395 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-23T17:29:31,395 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/snapshot-testExportExpiredSnapshot 2024-11-23T17:29:31,396 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_POST_OPERATION 2024-11-23T17:29:31,396 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }, snapshot procedure id = 185 2024-11-23T17:29:31,397 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=185, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } in 235 msec 2024-11-23T17:29:31,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=185 2024-11-23T17:29:31,476 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testExportExpiredSnapshot completed 2024-11-23T17:29:32,315 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732382632246_0007_000001 (auth:SIMPLE) from 127.0.0.1:55846 2024-11-23T17:29:32,325 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-1_0/usercache/jenkins/appcache/application_1732382632246_0007/container_1732382632246_0007_01_000001/launch_container.sh] 2024-11-23T17:29:32,325 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-1_0/usercache/jenkins/appcache/application_1732382632246_0007/container_1732382632246_0007_01_000001/container_tokens] 2024-11-23T17:29:32,325 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-1_0/usercache/jenkins/appcache/application_1732382632246_0007/container_1732382632246_0007_01_000001/sysfs] 2024-11-23T17:29:33,429 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-23T17:29:38,091 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T17:29:39,806 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot 2024-11-23T17:29:39,806 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot Metrics about Tables on a single HBase RegionServer 2024-11-23T17:29:41,483 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732382981483 2024-11-23T17:29:41,483 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:37605, tgtDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732382981483, rawTgtDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732382981483, srcFsUri=hdfs://localhost:37605, srcDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf 2024-11-23T17:29:41,525 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:37605, inputRoot=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf 2024-11-23T17:29:41,525 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1325804072_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732382981483, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732382981483/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot 2024-11-23T17:29:41,528 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-11-23T17:29:41,529 ERROR [Time-limited test {}] util.AbstractHBaseTool(152): Error running command-line tool org.apache.hadoop.hbase.snapshot.SnapshotTTLExpiredException: TTL for snapshot 'snapshot-testExportExpiredSnapshot' has already expired. at org.apache.hadoop.hbase.snapshot.ExportSnapshot.verifySnapshot(ExportSnapshot.java:960) ~[classes/:?] at org.apache.hadoop.hbase.snapshot.ExportSnapshot.doWork(ExportSnapshot.java:1105) ~[classes/:?] at org.apache.hadoop.hbase.util.AbstractHBaseTool.run(AbstractHBaseTool.java:150) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:82) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.runExportSnapshot(TestExportSnapshot.java:570) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportExpiredSnapshot(TestExportSnapshot.java:362) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T17:29:41,531 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportExpiredSnapshot 2024-11-23T17:29:41,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure2.ProcedureExecutor(1139): Stored pid=188, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-23T17:29:41,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=188 2024-11-23T17:29:41,535 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732382981535"}]},"ts":"1732382981535"} 2024-11-23T17:29:41,538 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=DISABLING in hbase:meta 2024-11-23T17:29:41,538 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testtb-testExportExpiredSnapshot to state=DISABLING 2024-11-23T17:29:41,539 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=189, ppid=188, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportExpiredSnapshot}] 2024-11-23T17:29:41,541 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=190, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=faff3508be89550a12635573fe1c9dc5, UNASSIGN}, {pid=191, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=653be934b06b3ef7ec2913ea01c4f27f, UNASSIGN}] 2024-11-23T17:29:41,542 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=191, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=653be934b06b3ef7ec2913ea01c4f27f, UNASSIGN 2024-11-23T17:29:41,542 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=190, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=faff3508be89550a12635573fe1c9dc5, UNASSIGN 2024-11-23T17:29:41,543 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=191 updating hbase:meta row=653be934b06b3ef7ec2913ea01c4f27f, regionState=CLOSING, regionLocation=215bb12e999f,45467,1732382620632 2024-11-23T17:29:41,543 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=190 updating hbase:meta row=faff3508be89550a12635573fe1c9dc5, regionState=CLOSING, regionLocation=215bb12e999f,44667,1732382620918 2024-11-23T17:29:41,545 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=190, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=faff3508be89550a12635573fe1c9dc5, UNASSIGN because future has completed 2024-11-23T17:29:41,545 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T17:29:41,545 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=192, ppid=190, state=RUNNABLE, hasLock=false; CloseRegionProcedure faff3508be89550a12635573fe1c9dc5, server=215bb12e999f,44667,1732382620918}] 2024-11-23T17:29:41,546 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=191, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=653be934b06b3ef7ec2913ea01c4f27f, UNASSIGN because future has completed 2024-11-23T17:29:41,546 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T17:29:41,546 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=193, ppid=191, state=RUNNABLE, hasLock=false; CloseRegionProcedure 653be934b06b3ef7ec2913ea01c4f27f, server=215bb12e999f,45467,1732382620632}] 2024-11-23T17:29:41,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=188 2024-11-23T17:29:41,698 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] handler.UnassignRegionHandler(122): Close faff3508be89550a12635573fe1c9dc5 2024-11-23T17:29:41,698 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T17:29:41,698 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1722): Closing faff3508be89550a12635573fe1c9dc5, disabling compactions & flushes 2024-11-23T17:29:41,698 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,,1732382968811.faff3508be89550a12635573fe1c9dc5. 2024-11-23T17:29:41,698 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,,1732382968811.faff3508be89550a12635573fe1c9dc5. 2024-11-23T17:29:41,698 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,,1732382968811.faff3508be89550a12635573fe1c9dc5. after waiting 0 ms 2024-11-23T17:29:41,698 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,,1732382968811.faff3508be89550a12635573fe1c9dc5. 2024-11-23T17:29:41,700 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] handler.UnassignRegionHandler(122): Close 653be934b06b3ef7ec2913ea01c4f27f 2024-11-23T17:29:41,700 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T17:29:41,700 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1722): Closing 653be934b06b3ef7ec2913ea01c4f27f, disabling compactions & flushes 2024-11-23T17:29:41,700 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,1,1732382968811.653be934b06b3ef7ec2913ea01c4f27f. 2024-11-23T17:29:41,700 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,1,1732382968811.653be934b06b3ef7ec2913ea01c4f27f. 2024-11-23T17:29:41,701 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,1,1732382968811.653be934b06b3ef7ec2913ea01c4f27f. after waiting 0 ms 2024-11-23T17:29:41,701 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,1,1732382968811.653be934b06b3ef7ec2913ea01c4f27f. 2024-11-23T17:29:41,703 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportExpiredSnapshot/faff3508be89550a12635573fe1c9dc5/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-23T17:29:41,704 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:29:41,704 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,,1732382968811.faff3508be89550a12635573fe1c9dc5. 2024-11-23T17:29:41,704 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1676): Region close journal for faff3508be89550a12635573fe1c9dc5: Waiting for close lock at 1732382981698Running coprocessor pre-close hooks at 1732382981698Disabling compacts and flushes for region at 1732382981698Disabling writes for close at 1732382981698Writing region close event to WAL at 1732382981699 (+1 ms)Running coprocessor post-close hooks at 1732382981704 (+5 ms)Closed at 1732382981704 2024-11-23T17:29:41,705 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportExpiredSnapshot/653be934b06b3ef7ec2913ea01c4f27f/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-23T17:29:41,706 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:29:41,706 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] handler.UnassignRegionHandler(157): Closed faff3508be89550a12635573fe1c9dc5 2024-11-23T17:29:41,706 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,1,1732382968811.653be934b06b3ef7ec2913ea01c4f27f. 2024-11-23T17:29:41,707 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1676): Region close journal for 653be934b06b3ef7ec2913ea01c4f27f: Waiting for close lock at 1732382981700Running coprocessor pre-close hooks at 1732382981700Disabling compacts and flushes for region at 1732382981700Disabling writes for close at 1732382981701 (+1 ms)Writing region close event to WAL at 1732382981701Running coprocessor post-close hooks at 1732382981706 (+5 ms)Closed at 1732382981706 2024-11-23T17:29:41,708 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=190 updating hbase:meta row=faff3508be89550a12635573fe1c9dc5, regionState=CLOSED 2024-11-23T17:29:41,709 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] handler.UnassignRegionHandler(157): Closed 653be934b06b3ef7ec2913ea01c4f27f 2024-11-23T17:29:41,709 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=191 updating hbase:meta row=653be934b06b3ef7ec2913ea01c4f27f, regionState=CLOSED 2024-11-23T17:29:41,709 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=192, ppid=190, state=RUNNABLE, hasLock=false; CloseRegionProcedure faff3508be89550a12635573fe1c9dc5, server=215bb12e999f,44667,1732382620918 because future has completed 2024-11-23T17:29:41,712 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=193, ppid=191, state=RUNNABLE, hasLock=false; CloseRegionProcedure 653be934b06b3ef7ec2913ea01c4f27f, server=215bb12e999f,45467,1732382620632 because future has completed 2024-11-23T17:29:41,713 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=192, resume processing ppid=190 2024-11-23T17:29:41,714 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=192, ppid=190, state=SUCCESS, hasLock=false; CloseRegionProcedure faff3508be89550a12635573fe1c9dc5, server=215bb12e999f,44667,1732382620918 in 166 msec 2024-11-23T17:29:41,715 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=193, resume processing ppid=191 2024-11-23T17:29:41,715 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=190, ppid=189, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=faff3508be89550a12635573fe1c9dc5, UNASSIGN in 173 msec 2024-11-23T17:29:41,715 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=193, ppid=191, state=SUCCESS, hasLock=false; CloseRegionProcedure 653be934b06b3ef7ec2913ea01c4f27f, server=215bb12e999f,45467,1732382620632 in 167 msec 2024-11-23T17:29:41,717 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=191, resume processing ppid=189 2024-11-23T17:29:41,717 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=191, ppid=189, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=653be934b06b3ef7ec2913ea01c4f27f, UNASSIGN in 174 msec 2024-11-23T17:29:41,719 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=189, resume processing ppid=188 2024-11-23T17:29:41,719 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=189, ppid=188, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportExpiredSnapshot in 178 msec 2024-11-23T17:29:41,720 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732382981720"}]},"ts":"1732382981720"} 2024-11-23T17:29:41,722 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=DISABLED in hbase:meta 2024-11-23T17:29:41,722 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testExportExpiredSnapshot to state=DISABLED 2024-11-23T17:29:41,724 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=188, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportExpiredSnapshot in 191 msec 2024-11-23T17:29:41,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=188 2024-11-23T17:29:41,856 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-11-23T17:29:41,857 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportExpiredSnapshot 2024-11-23T17:29:41,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure2.ProcedureExecutor(1139): Stored pid=194, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-23T17:29:41,859 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=194, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-23T17:29:41,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportExpiredSnapshot 2024-11-23T17:29:41,860 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=194, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-23T17:29:41,862 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41065 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportExpiredSnapshot 2024-11-23T17:29:41,863 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportExpiredSnapshot/653be934b06b3ef7ec2913ea01c4f27f 2024-11-23T17:29:41,863 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportExpiredSnapshot/faff3508be89550a12635573fe1c9dc5 2024-11-23T17:29:41,864 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportExpiredSnapshot/653be934b06b3ef7ec2913ea01c4f27f/cf, FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportExpiredSnapshot/653be934b06b3ef7ec2913ea01c4f27f/recovered.edits] 2024-11-23T17:29:41,864 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportExpiredSnapshot/faff3508be89550a12635573fe1c9dc5/cf, FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportExpiredSnapshot/faff3508be89550a12635573fe1c9dc5/recovered.edits] 2024-11-23T17:29:41,912 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportExpiredSnapshot/653be934b06b3ef7ec2913ea01c4f27f/cf/f98950490c484432855b5524300e91d7 to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/archive/data/default/testtb-testExportExpiredSnapshot/653be934b06b3ef7ec2913ea01c4f27f/cf/f98950490c484432855b5524300e91d7 2024-11-23T17:29:41,913 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportExpiredSnapshot/faff3508be89550a12635573fe1c9dc5/cf/d19cd4f207bb4d88ba456a111f85791d to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/archive/data/default/testtb-testExportExpiredSnapshot/faff3508be89550a12635573fe1c9dc5/cf/d19cd4f207bb4d88ba456a111f85791d 2024-11-23T17:29:41,915 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportExpiredSnapshot/653be934b06b3ef7ec2913ea01c4f27f/recovered.edits/9.seqid to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/archive/data/default/testtb-testExportExpiredSnapshot/653be934b06b3ef7ec2913ea01c4f27f/recovered.edits/9.seqid 2024-11-23T17:29:41,915 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportExpiredSnapshot/653be934b06b3ef7ec2913ea01c4f27f 2024-11-23T17:29:41,915 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportExpiredSnapshot/faff3508be89550a12635573fe1c9dc5/recovered.edits/9.seqid to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/archive/data/default/testtb-testExportExpiredSnapshot/faff3508be89550a12635573fe1c9dc5/recovered.edits/9.seqid 2024-11-23T17:29:41,916 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportExpiredSnapshot/faff3508be89550a12635573fe1c9dc5 2024-11-23T17:29:41,916 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportExpiredSnapshot regions 2024-11-23T17:29:41,918 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=194, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-23T17:29:41,920 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportExpiredSnapshot from hbase:meta 2024-11-23T17:29:41,922 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportExpiredSnapshot' descriptor. 2024-11-23T17:29:41,923 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=194, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-23T17:29:41,923 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportExpiredSnapshot' from region states. 2024-11-23T17:29:41,923 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot,,1732382968811.faff3508be89550a12635573fe1c9dc5.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732382981923"}]},"ts":"9223372036854775807"} 2024-11-23T17:29:41,923 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot,1,1732382968811.653be934b06b3ef7ec2913ea01c4f27f.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732382981923"}]},"ts":"9223372036854775807"} 2024-11-23T17:29:41,925 INFO [PEWorker-3 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-23T17:29:41,925 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => faff3508be89550a12635573fe1c9dc5, NAME => 'testtb-testExportExpiredSnapshot,,1732382968811.faff3508be89550a12635573fe1c9dc5.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 653be934b06b3ef7ec2913ea01c4f27f, NAME => 'testtb-testExportExpiredSnapshot,1,1732382968811.653be934b06b3ef7ec2913ea01c4f27f.', STARTKEY => '1', ENDKEY => ''}] 2024-11-23T17:29:41,925 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportExpiredSnapshot' as deleted. 2024-11-23T17:29:41,926 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732382981925"}]},"ts":"9223372036854775807"} 2024-11-23T17:29:41,927 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportExpiredSnapshot state from META 2024-11-23T17:29:41,928 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=194, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-23T17:29:41,929 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=194, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportExpiredSnapshot in 71 msec 2024-11-23T17:29:41,977 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44667-0x10168b8ee290002, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-23T17:29:41,977 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45467-0x10168b8ee290001, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-23T17:29:41,977 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x10168b8ee290003, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-23T17:29:41,977 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-23T17:29:41,978 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-11-23T17:29:41,978 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-11-23T17:29:41,979 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-11-23T17:29:41,988 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x10168b8ee290003, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-23T17:29:41,988 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45467-0x10168b8ee290001, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-23T17:29:41,988 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x10168b8ee290003, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:29:41,988 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45467-0x10168b8ee290001, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:29:41,988 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-23T17:29:41,988 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:29:41,988 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44667-0x10168b8ee290002, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:29:41,988 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data null 2024-11-23T17:29:41,988 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-11-23T17:29:41,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=194 2024-11-23T17:29:41,989 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportExpiredSnapshot 2024-11-23T17:29:41,989 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-11-23T17:29:41,990 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:29:41,990 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:29:41,990 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:29:41,991 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:29:41,999 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportExpiredSnapshot" type: DISABLED 2024-11-23T17:29:42,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportExpiredSnapshot 2024-11-23T17:29:42,002 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snapshot-testExportExpiredSnapshot" type: DISABLED 2024-11-23T17:29:42,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotManager(381): Deleting snapshot: snapshot-testExportExpiredSnapshot 2024-11-23T17:29:42,017 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportExpiredSnapshot" type: DISABLED 2024-11-23T17:29:42,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportExpiredSnapshot 2024-11-23T17:29:42,040 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportExpiredSnapshot Thread=806 (was 814), OpenFileDescriptor=795 (was 817), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=986 (was 1052), ProcessCount=17 (was 20), AvailableMemoryMB=1995 (was 1729) - AvailableMemoryMB LEAK? - 2024-11-23T17:29:42,040 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=806 is superior to 500 2024-11-23T17:29:42,057 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testEmptyExportFileSystemState Thread=806, OpenFileDescriptor=795, MaxFileDescriptor=1048576, SystemLoadAverage=986, ProcessCount=17, AvailableMemoryMB=1994 2024-11-23T17:29:42,057 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=806 is superior to 500 2024-11-23T17:29:42,058 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T17:29:42,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure2.ProcedureExecutor(1139): Stored pid=195, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-23T17:29:42,060 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T17:29:42,061 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:29:42,061 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testEmptyExportFileSystemState" procId is: 195 2024-11-23T17:29:42,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=195 2024-11-23T17:29:42,062 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T17:29:42,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742232_1408 (size=412) 2024-11-23T17:29:42,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742232_1408 (size=412) 2024-11-23T17:29:42,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742232_1408 (size=412) 2024-11-23T17:29:42,069 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => b16ac99853753198964894fc0ca70344, NAME => 'testtb-testEmptyExportFileSystemState,,1732382982058.b16ac99853753198964894fc0ca70344.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf 2024-11-23T17:29:42,069 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 75de5a87a12fe52ded93c5475af83b5e, NAME => 'testtb-testEmptyExportFileSystemState,1,1732382982058.75de5a87a12fe52ded93c5475af83b5e.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf 2024-11-23T17:29:42,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742234_1410 (size=73) 2024-11-23T17:29:42,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742234_1410 (size=73) 2024-11-23T17:29:42,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742234_1410 (size=73) 2024-11-23T17:29:42,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742233_1409 (size=73) 2024-11-23T17:29:42,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742233_1409 (size=73) 2024-11-23T17:29:42,085 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,,1732382982058.b16ac99853753198964894fc0ca70344.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:29:42,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742233_1409 (size=73) 2024-11-23T17:29:42,085 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1722): Closing b16ac99853753198964894fc0ca70344, disabling compactions & flushes 2024-11-23T17:29:42,086 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,,1732382982058.b16ac99853753198964894fc0ca70344. 2024-11-23T17:29:42,086 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,,1732382982058.b16ac99853753198964894fc0ca70344. 2024-11-23T17:29:42,086 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,,1732382982058.b16ac99853753198964894fc0ca70344. after waiting 0 ms 2024-11-23T17:29:42,086 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,,1732382982058.b16ac99853753198964894fc0ca70344. 2024-11-23T17:29:42,086 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,,1732382982058.b16ac99853753198964894fc0ca70344. 2024-11-23T17:29:42,086 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1676): Region close journal for b16ac99853753198964894fc0ca70344: Waiting for close lock at 1732382982085Disabling compacts and flushes for region at 1732382982085Disabling writes for close at 1732382982086 (+1 ms)Writing region close event to WAL at 1732382982086Closed at 1732382982086 2024-11-23T17:29:42,086 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,1,1732382982058.75de5a87a12fe52ded93c5475af83b5e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:29:42,086 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1722): Closing 75de5a87a12fe52ded93c5475af83b5e, disabling compactions & flushes 2024-11-23T17:29:42,086 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,1,1732382982058.75de5a87a12fe52ded93c5475af83b5e. 2024-11-23T17:29:42,086 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,1,1732382982058.75de5a87a12fe52ded93c5475af83b5e. 2024-11-23T17:29:42,086 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,1,1732382982058.75de5a87a12fe52ded93c5475af83b5e. after waiting 0 ms 2024-11-23T17:29:42,086 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,1,1732382982058.75de5a87a12fe52ded93c5475af83b5e. 2024-11-23T17:29:42,086 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,1,1732382982058.75de5a87a12fe52ded93c5475af83b5e. 2024-11-23T17:29:42,086 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1676): Region close journal for 75de5a87a12fe52ded93c5475af83b5e: Waiting for close lock at 1732382982086Disabling compacts and flushes for region at 1732382982086Disabling writes for close at 1732382982086Writing region close event to WAL at 1732382982086Closed at 1732382982086 2024-11-23T17:29:42,087 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T17:29:42,087 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testEmptyExportFileSystemState,,1732382982058.b16ac99853753198964894fc0ca70344.","families":{"info":[{"qualifier":"regioninfo","vlen":72,"tag":[],"timestamp":"1732382982087"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732382982087"}]},"ts":"1732382982087"} 2024-11-23T17:29:42,087 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testEmptyExportFileSystemState,1,1732382982058.75de5a87a12fe52ded93c5475af83b5e.","families":{"info":[{"qualifier":"regioninfo","vlen":72,"tag":[],"timestamp":"1732382982087"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732382982087"}]},"ts":"1732382982087"} 2024-11-23T17:29:42,089 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-23T17:29:42,090 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T17:29:42,090 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732382982090"}]},"ts":"1732382982090"} 2024-11-23T17:29:42,091 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=ENABLING in hbase:meta 2024-11-23T17:29:42,091 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {215bb12e999f=0} racks are {/default-rack=0} 2024-11-23T17:29:42,093 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-23T17:29:42,093 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-23T17:29:42,093 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-23T17:29:42,093 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-23T17:29:42,093 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-23T17:29:42,093 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-23T17:29:42,093 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-23T17:29:42,093 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-23T17:29:42,093 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-23T17:29:42,093 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-23T17:29:42,093 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=196, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=b16ac99853753198964894fc0ca70344, ASSIGN}, {pid=197, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=75de5a87a12fe52ded93c5475af83b5e, ASSIGN}] 2024-11-23T17:29:42,094 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=197, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=75de5a87a12fe52ded93c5475af83b5e, ASSIGN 2024-11-23T17:29:42,094 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=196, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=b16ac99853753198964894fc0ca70344, ASSIGN 2024-11-23T17:29:42,095 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=196, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=b16ac99853753198964894fc0ca70344, ASSIGN; state=OFFLINE, location=215bb12e999f,41065,1732382621000; forceNewPlan=false, retain=false 2024-11-23T17:29:42,095 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=197, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=75de5a87a12fe52ded93c5475af83b5e, ASSIGN; state=OFFLINE, location=215bb12e999f,44667,1732382620918; forceNewPlan=false, retain=false 2024-11-23T17:29:42,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=195 2024-11-23T17:29:42,245 INFO [215bb12e999f:39621 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-23T17:29:42,246 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=196 updating hbase:meta row=b16ac99853753198964894fc0ca70344, regionState=OPENING, regionLocation=215bb12e999f,41065,1732382621000 2024-11-23T17:29:42,246 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=197 updating hbase:meta row=75de5a87a12fe52ded93c5475af83b5e, regionState=OPENING, regionLocation=215bb12e999f,44667,1732382620918 2024-11-23T17:29:42,247 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=197, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=75de5a87a12fe52ded93c5475af83b5e, ASSIGN because future has completed 2024-11-23T17:29:42,247 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=198, ppid=197, state=RUNNABLE, hasLock=false; OpenRegionProcedure 75de5a87a12fe52ded93c5475af83b5e, server=215bb12e999f,44667,1732382620918}] 2024-11-23T17:29:42,248 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=196, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=b16ac99853753198964894fc0ca70344, ASSIGN because future has completed 2024-11-23T17:29:42,248 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=199, ppid=196, state=RUNNABLE, hasLock=false; OpenRegionProcedure b16ac99853753198964894fc0ca70344, server=215bb12e999f,41065,1732382621000}] 2024-11-23T17:29:42,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=195 2024-11-23T17:29:42,402 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] handler.AssignRegionHandler(132): Open testtb-testEmptyExportFileSystemState,1,1732382982058.75de5a87a12fe52ded93c5475af83b5e. 2024-11-23T17:29:42,402 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(7752): Opening region: {ENCODED => 75de5a87a12fe52ded93c5475af83b5e, NAME => 'testtb-testEmptyExportFileSystemState,1,1732382982058.75de5a87a12fe52ded93c5475af83b5e.', STARTKEY => '1', ENDKEY => ''} 2024-11-23T17:29:42,402 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testEmptyExportFileSystemState,1,1732382982058.75de5a87a12fe52ded93c5475af83b5e. service=AccessControlService 2024-11-23T17:29:42,402 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:29:42,402 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] handler.AssignRegionHandler(132): Open testtb-testEmptyExportFileSystemState,,1732382982058.b16ac99853753198964894fc0ca70344. 2024-11-23T17:29:42,402 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testEmptyExportFileSystemState 75de5a87a12fe52ded93c5475af83b5e 2024-11-23T17:29:42,402 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(7752): Opening region: {ENCODED => b16ac99853753198964894fc0ca70344, NAME => 'testtb-testEmptyExportFileSystemState,,1732382982058.b16ac99853753198964894fc0ca70344.', STARTKEY => '', ENDKEY => '1'} 2024-11-23T17:29:42,402 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,1,1732382982058.75de5a87a12fe52ded93c5475af83b5e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:29:42,402 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(7794): checking encryption for 75de5a87a12fe52ded93c5475af83b5e 2024-11-23T17:29:42,402 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(7797): checking classloading for 75de5a87a12fe52ded93c5475af83b5e 2024-11-23T17:29:42,403 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testEmptyExportFileSystemState,,1732382982058.b16ac99853753198964894fc0ca70344. service=AccessControlService 2024-11-23T17:29:42,403 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:29:42,403 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testEmptyExportFileSystemState b16ac99853753198964894fc0ca70344 2024-11-23T17:29:42,403 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,,1732382982058.b16ac99853753198964894fc0ca70344.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:29:42,403 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(7794): checking encryption for b16ac99853753198964894fc0ca70344 2024-11-23T17:29:42,403 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(7797): checking classloading for b16ac99853753198964894fc0ca70344 2024-11-23T17:29:42,404 INFO [StoreOpener-75de5a87a12fe52ded93c5475af83b5e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 75de5a87a12fe52ded93c5475af83b5e 2024-11-23T17:29:42,405 INFO [StoreOpener-75de5a87a12fe52ded93c5475af83b5e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 75de5a87a12fe52ded93c5475af83b5e columnFamilyName cf 2024-11-23T17:29:42,405 DEBUG [StoreOpener-75de5a87a12fe52ded93c5475af83b5e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:29:42,406 INFO [StoreOpener-75de5a87a12fe52ded93c5475af83b5e-1 {}] regionserver.HStore(327): Store=75de5a87a12fe52ded93c5475af83b5e/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T17:29:42,406 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1038): replaying wal for 75de5a87a12fe52ded93c5475af83b5e 2024-11-23T17:29:42,406 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testEmptyExportFileSystemState/75de5a87a12fe52ded93c5475af83b5e 2024-11-23T17:29:42,407 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testEmptyExportFileSystemState/75de5a87a12fe52ded93c5475af83b5e 2024-11-23T17:29:42,407 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1048): stopping wal replay for 75de5a87a12fe52ded93c5475af83b5e 2024-11-23T17:29:42,407 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1060): Cleaning up temporary data for 75de5a87a12fe52ded93c5475af83b5e 2024-11-23T17:29:42,408 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1093): writing seq id for 75de5a87a12fe52ded93c5475af83b5e 2024-11-23T17:29:42,410 INFO [StoreOpener-b16ac99853753198964894fc0ca70344-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region b16ac99853753198964894fc0ca70344 2024-11-23T17:29:42,410 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testEmptyExportFileSystemState/75de5a87a12fe52ded93c5475af83b5e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T17:29:42,411 INFO [StoreOpener-b16ac99853753198964894fc0ca70344-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b16ac99853753198964894fc0ca70344 columnFamilyName cf 2024-11-23T17:29:42,411 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1114): Opened 75de5a87a12fe52ded93c5475af83b5e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=58929665, jitterRate=-0.12187956273555756}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T17:29:42,411 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 75de5a87a12fe52ded93c5475af83b5e 2024-11-23T17:29:42,411 DEBUG [StoreOpener-b16ac99853753198964894fc0ca70344-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:29:42,412 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1006): Region open journal for 75de5a87a12fe52ded93c5475af83b5e: Running coprocessor pre-open hook at 1732382982403Writing region info on filesystem at 1732382982403Initializing all the Stores at 1732382982403Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732382982403Cleaning up temporary data from old regions at 1732382982407 (+4 ms)Running coprocessor post-open hooks at 1732382982411 (+4 ms)Region opened successfully at 1732382982412 (+1 ms) 2024-11-23T17:29:42,412 INFO [StoreOpener-b16ac99853753198964894fc0ca70344-1 {}] regionserver.HStore(327): Store=b16ac99853753198964894fc0ca70344/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T17:29:42,412 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testEmptyExportFileSystemState,1,1732382982058.75de5a87a12fe52ded93c5475af83b5e., pid=198, masterSystemTime=1732382982399 2024-11-23T17:29:42,412 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1038): replaying wal for b16ac99853753198964894fc0ca70344 2024-11-23T17:29:42,413 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testEmptyExportFileSystemState/b16ac99853753198964894fc0ca70344 2024-11-23T17:29:42,414 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testEmptyExportFileSystemState/b16ac99853753198964894fc0ca70344 2024-11-23T17:29:42,414 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1048): stopping wal replay for b16ac99853753198964894fc0ca70344 2024-11-23T17:29:42,414 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1060): Cleaning up temporary data for b16ac99853753198964894fc0ca70344 2024-11-23T17:29:42,415 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testEmptyExportFileSystemState,1,1732382982058.75de5a87a12fe52ded93c5475af83b5e. 2024-11-23T17:29:42,415 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] handler.AssignRegionHandler(153): Opened testtb-testEmptyExportFileSystemState,1,1732382982058.75de5a87a12fe52ded93c5475af83b5e. 2024-11-23T17:29:42,415 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=197 updating hbase:meta row=75de5a87a12fe52ded93c5475af83b5e, regionState=OPEN, openSeqNum=2, regionLocation=215bb12e999f,44667,1732382620918 2024-11-23T17:29:42,416 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1093): writing seq id for b16ac99853753198964894fc0ca70344 2024-11-23T17:29:42,417 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=198, ppid=197, state=RUNNABLE, hasLock=false; OpenRegionProcedure 75de5a87a12fe52ded93c5475af83b5e, server=215bb12e999f,44667,1732382620918 because future has completed 2024-11-23T17:29:42,420 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=198, resume processing ppid=197 2024-11-23T17:29:42,420 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=198, ppid=197, state=SUCCESS, hasLock=false; OpenRegionProcedure 75de5a87a12fe52ded93c5475af83b5e, server=215bb12e999f,44667,1732382620918 in 171 msec 2024-11-23T17:29:42,422 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=197, ppid=195, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=75de5a87a12fe52ded93c5475af83b5e, ASSIGN in 327 msec 2024-11-23T17:29:42,425 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testEmptyExportFileSystemState/b16ac99853753198964894fc0ca70344/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T17:29:42,425 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1114): Opened b16ac99853753198964894fc0ca70344; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73922617, jitterRate=0.10153283178806305}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T17:29:42,425 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1122): Running coprocessor post-open hooks for b16ac99853753198964894fc0ca70344 2024-11-23T17:29:42,425 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1006): Region open journal for b16ac99853753198964894fc0ca70344: Running coprocessor pre-open hook at 1732382982403Writing region info on filesystem at 1732382982403Initializing all the Stores at 1732382982404 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732382982404Cleaning up temporary data from old regions at 1732382982414 (+10 ms)Running coprocessor post-open hooks at 1732382982425 (+11 ms)Region opened successfully at 1732382982425 2024-11-23T17:29:42,426 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testEmptyExportFileSystemState,,1732382982058.b16ac99853753198964894fc0ca70344., pid=199, masterSystemTime=1732382982399 2024-11-23T17:29:42,427 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testEmptyExportFileSystemState,,1732382982058.b16ac99853753198964894fc0ca70344. 2024-11-23T17:29:42,427 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] handler.AssignRegionHandler(153): Opened testtb-testEmptyExportFileSystemState,,1732382982058.b16ac99853753198964894fc0ca70344. 2024-11-23T17:29:42,427 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=196 updating hbase:meta row=b16ac99853753198964894fc0ca70344, regionState=OPEN, openSeqNum=2, regionLocation=215bb12e999f,41065,1732382621000 2024-11-23T17:29:42,429 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=199, ppid=196, state=RUNNABLE, hasLock=false; OpenRegionProcedure b16ac99853753198964894fc0ca70344, server=215bb12e999f,41065,1732382621000 because future has completed 2024-11-23T17:29:42,431 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=199, resume processing ppid=196 2024-11-23T17:29:42,431 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=199, ppid=196, state=SUCCESS, hasLock=false; OpenRegionProcedure b16ac99853753198964894fc0ca70344, server=215bb12e999f,41065,1732382621000 in 182 msec 2024-11-23T17:29:42,433 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=196, resume processing ppid=195 2024-11-23T17:29:42,433 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=196, ppid=195, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=b16ac99853753198964894fc0ca70344, ASSIGN in 338 msec 2024-11-23T17:29:42,434 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T17:29:42,434 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732382982434"}]},"ts":"1732382982434"} 2024-11-23T17:29:42,435 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=ENABLED in hbase:meta 2024-11-23T17:29:42,436 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T17:29:42,436 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testEmptyExportFileSystemState jenkins: RWXCA 2024-11-23T17:29:42,444 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41065 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-11-23T17:29:42,514 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44667-0x10168b8ee290002, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:29:42,514 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x10168b8ee290003, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:29:42,514 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:29:42,514 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45467-0x10168b8ee290001, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:29:42,525 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-23T17:29:42,525 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:29:42,525 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-23T17:29:42,525 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:29:42,526 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-23T17:29:42,526 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:29:42,526 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-23T17:29:42,526 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:29:42,527 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=195, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testEmptyExportFileSystemState in 467 msec 2024-11-23T17:29:42,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=195 2024-11-23T17:29:42,686 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-11-23T17:29:42,686 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testEmptyExportFileSystemState get assigned. Timeout = 60000ms 2024-11-23T17:29:42,686 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T17:29:42,689 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44667 {}] regionserver.StoreScanner(1138): Switch to stream read (scanned=32778 bytes) of info 2024-11-23T17:29:42,693 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testEmptyExportFileSystemState assigned to meta. Checking AM states. 2024-11-23T17:29:42,693 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T17:29:42,693 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testEmptyExportFileSystemState assigned. 2024-11-23T17:29:42,693 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-23T17:29:42,695 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-11-23T17:29:42,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732382982695 (current time:1732382982695). 2024-11-23T17:29:42,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-23T17:29:42,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testEmptyExportFileSystemState VERSION not specified, setting to 2 2024-11-23T17:29:42,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-23T17:29:42,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5e511864, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:29:42,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.ClusterIdFetcher(90): Going to request 215bb12e999f,39621,-1 for getting cluster id 2024-11-23T17:29:42,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:29:42,697 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d' 2024-11-23T17:29:42,697 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:29:42,697 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d" 2024-11-23T17:29:42,697 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c34f63d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:29:42,697 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [215bb12e999f,39621,-1] 2024-11-23T17:29:42,698 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:29:42,698 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:29:42,698 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52152, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:29:42,699 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7b5d4870, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:29:42,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:29:42,700 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=215bb12e999f,44667,1732382620918, seqNum=-1] 2024-11-23T17:29:42,700 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:29:42,701 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49858, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:29:42,702 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621. 2024-11-23T17:29:42,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:29:42,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:29:42,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:29:42,702 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:29:42,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@dcc1bca, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:29:42,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.ClusterIdFetcher(90): Going to request 215bb12e999f,39621,-1 for getting cluster id 2024-11-23T17:29:42,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:29:42,704 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d' 2024-11-23T17:29:42,704 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:29:42,704 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d" 2024-11-23T17:29:42,704 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5cbcd5e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:29:42,704 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [215bb12e999f,39621,-1] 2024-11-23T17:29:42,704 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:29:42,704 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:29:42,705 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52168, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:29:42,706 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@19985223, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:29:42,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:29:42,707 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=215bb12e999f,44667,1732382620918, seqNum=-1] 2024-11-23T17:29:42,707 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:29:42,708 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49860, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:29:42,709 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testEmptyExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1732382625522.41264850465b123187f03a78b2521b39., hostname=215bb12e999f,41065,1732382621000, seqNum=2] 2024-11-23T17:29:42,709 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:29:42,710 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35786, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:29:42,711 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621. 2024-11-23T17:29:42,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor297.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:29:42,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:29:42,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:29:42,711 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:29:42,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-11-23T17:29:42,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-23T17:29:42,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure2.ProcedureExecutor(1139): Stored pid=200, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-11-23T17:29:42,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 200 2024-11-23T17:29:42,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=200 2024-11-23T17:29:42,714 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-23T17:29:42,715 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-23T17:29:42,718 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-23T17:29:42,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742235_1411 (size=185) 2024-11-23T17:29:42,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742235_1411 (size=185) 2024-11-23T17:29:42,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742235_1411 (size=185) 2024-11-23T17:29:42,736 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-23T17:29:42,736 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=201, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b16ac99853753198964894fc0ca70344}, {pid=202, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 75de5a87a12fe52ded93c5475af83b5e}] 2024-11-23T17:29:42,737 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=201, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b16ac99853753198964894fc0ca70344 2024-11-23T17:29:42,737 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=202, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 75de5a87a12fe52ded93c5475af83b5e 2024-11-23T17:29:42,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=200 2024-11-23T17:29:42,889 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44667 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=202 2024-11-23T17:29:42,889 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41065 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=201 2024-11-23T17:29:42,889 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,1,1732382982058.75de5a87a12fe52ded93c5475af83b5e. 2024-11-23T17:29:42,889 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.HRegion(2603): Flush status journal for 75de5a87a12fe52ded93c5475af83b5e: 2024-11-23T17:29:42,889 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,1,1732382982058.75de5a87a12fe52ded93c5475af83b5e. for emptySnaptb0-testEmptyExportFileSystemState completed. 2024-11-23T17:29:42,889 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,1,1732382982058.75de5a87a12fe52ded93c5475af83b5e.' region-info for snapshot=emptySnaptb0-testEmptyExportFileSystemState 2024-11-23T17:29:42,890 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:29:42,890 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,,1732382982058.b16ac99853753198964894fc0ca70344. 2024-11-23T17:29:42,890 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-23T17:29:42,890 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.HRegion(2603): Flush status journal for b16ac99853753198964894fc0ca70344: 2024-11-23T17:29:42,890 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,,1732382982058.b16ac99853753198964894fc0ca70344. for emptySnaptb0-testEmptyExportFileSystemState completed. 2024-11-23T17:29:42,890 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,,1732382982058.b16ac99853753198964894fc0ca70344.' region-info for snapshot=emptySnaptb0-testEmptyExportFileSystemState 2024-11-23T17:29:42,890 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:29:42,890 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-23T17:29:42,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742236_1412 (size=76) 2024-11-23T17:29:42,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742236_1412 (size=76) 2024-11-23T17:29:42,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742236_1412 (size=76) 2024-11-23T17:29:42,895 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,,1732382982058.b16ac99853753198964894fc0ca70344. 2024-11-23T17:29:42,895 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=201 2024-11-23T17:29:42,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.HMaster(4169): Remote procedure done, pid=201 2024-11-23T17:29:42,896 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testEmptyExportFileSystemState on region b16ac99853753198964894fc0ca70344 2024-11-23T17:29:42,896 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=201, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b16ac99853753198964894fc0ca70344 2024-11-23T17:29:42,898 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=201, ppid=200, state=SUCCESS, hasLock=false; SnapshotRegionProcedure b16ac99853753198964894fc0ca70344 in 161 msec 2024-11-23T17:29:42,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742237_1413 (size=76) 2024-11-23T17:29:42,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742237_1413 (size=76) 2024-11-23T17:29:42,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742237_1413 (size=76) 2024-11-23T17:29:42,919 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,1,1732382982058.75de5a87a12fe52ded93c5475af83b5e. 2024-11-23T17:29:42,919 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=202 2024-11-23T17:29:42,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.HMaster(4169): Remote procedure done, pid=202 2024-11-23T17:29:42,919 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testEmptyExportFileSystemState on region 75de5a87a12fe52ded93c5475af83b5e 2024-11-23T17:29:42,920 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=202, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 75de5a87a12fe52ded93c5475af83b5e 2024-11-23T17:29:42,925 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=202, resume processing ppid=200 2024-11-23T17:29:42,925 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-23T17:29:42,925 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=202, ppid=200, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 75de5a87a12fe52ded93c5475af83b5e in 186 msec 2024-11-23T17:29:42,925 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-23T17:29:42,926 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-23T17:29:42,926 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testEmptyExportFileSystemState 2024-11-23T17:29:42,927 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-11-23T17:29:42,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742238_1414 (size=567) 2024-11-23T17:29:42,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742238_1414 (size=567) 2024-11-23T17:29:42,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742238_1414 (size=567) 2024-11-23T17:29:42,940 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-23T17:29:42,950 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-23T17:29:42,950 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-11-23T17:29:42,952 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-23T17:29:42,952 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 200 2024-11-23T17:29:42,953 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=200, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } in 240 msec 2024-11-23T17:29:43,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=200 2024-11-23T17:29:43,026 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-11-23T17:29:43,030 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='0d7a1b52d87e8663193b89ac75f10d1b8', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,,1732382982058.b16ac99853753198964894fc0ca70344., hostname=215bb12e999f,41065,1732382621000, seqNum=2] 2024-11-23T17:29:43,031 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='19956c71eb39d4f1970f2ba038f6d918c', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,1,1732382982058.75de5a87a12fe52ded93c5475af83b5e., hostname=215bb12e999f,44667,1732382620918, seqNum=2] 2024-11-23T17:29:43,033 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='2e7a857b329004c528dc9148fcd096cc4', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,1,1732382982058.75de5a87a12fe52ded93c5475af83b5e., hostname=215bb12e999f,44667,1732382620918, seqNum=2] 2024-11-23T17:29:43,034 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='3714c79b74e88fb69bb77b4446a7cbcff', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,1,1732382982058.75de5a87a12fe52ded93c5475af83b5e., hostname=215bb12e999f,44667,1732382620918, seqNum=2] 2024-11-23T17:29:43,035 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='45725d7169a95217b7fd427c134874da3', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,1,1732382982058.75de5a87a12fe52ded93c5475af83b5e., hostname=215bb12e999f,44667,1732382620918, seqNum=2] 2024-11-23T17:29:43,037 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41065 {}] regionserver.HRegion(8528): writing data to region testtb-testEmptyExportFileSystemState,,1732382982058.b16ac99853753198964894fc0ca70344. with WAL disabled. Data may be lost in the event of a crash. 2024-11-23T17:29:43,040 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44667 {}] regionserver.HRegion(8528): writing data to region testtb-testEmptyExportFileSystemState,1,1732382982058.75de5a87a12fe52ded93c5475af83b5e. with WAL disabled. Data may be lost in the event of a crash. 2024-11-23T17:29:43,044 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-23T17:29:43,046 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testEmptyExportFileSystemState 2024-11-23T17:29:43,046 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testEmptyExportFileSystemState,,1732382982058.b16ac99853753198964894fc0ca70344. 2024-11-23T17:29:43,046 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T17:29:43,048 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-23T17:29:43,052 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-23T17:29:43,057 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-23T17:29:43,060 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-11-23T17:29:43,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732382983060 (current time:1732382983060). 2024-11-23T17:29:43,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-23T17:29:43,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testEmptyExportFileSystemState VERSION not specified, setting to 2 2024-11-23T17:29:43,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-23T17:29:43,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@335d046f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:29:43,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.ClusterIdFetcher(90): Going to request 215bb12e999f,39621,-1 for getting cluster id 2024-11-23T17:29:43,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:29:43,061 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d' 2024-11-23T17:29:43,061 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:29:43,061 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d" 2024-11-23T17:29:43,061 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7b060322, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:29:43,061 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [215bb12e999f,39621,-1] 2024-11-23T17:29:43,062 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:29:43,062 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:29:43,062 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52186, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:29:43,063 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@c6cc5e5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:29:43,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:29:43,064 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=215bb12e999f,44667,1732382620918, seqNum=-1] 2024-11-23T17:29:43,064 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:29:43,065 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49868, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:29:43,066 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621. 2024-11-23T17:29:43,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:29:43,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:29:43,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:29:43,066 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:29:43,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5fc937b8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:29:43,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.ClusterIdFetcher(90): Going to request 215bb12e999f,39621,-1 for getting cluster id 2024-11-23T17:29:43,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:29:43,067 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d' 2024-11-23T17:29:43,068 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:29:43,068 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d" 2024-11-23T17:29:43,068 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e4e4114, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:29:43,068 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [215bb12e999f,39621,-1] 2024-11-23T17:29:43,068 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:29:43,068 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:29:43,069 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52200, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:29:43,069 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@75114407, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:29:43,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:29:43,071 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=215bb12e999f,44667,1732382620918, seqNum=-1] 2024-11-23T17:29:43,071 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:29:43,072 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49880, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:29:43,073 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testEmptyExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1732382625522.41264850465b123187f03a78b2521b39., hostname=215bb12e999f,41065,1732382621000, seqNum=2] 2024-11-23T17:29:43,074 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:29:43,075 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35788, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:29:43,076 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621. 2024-11-23T17:29:43,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor297.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:29:43,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:29:43,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:29:43,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-11-23T17:29:43,076 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:29:43,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-23T17:29:43,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure2.ProcedureExecutor(1139): Stored pid=203, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-11-23T17:29:43,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 203 2024-11-23T17:29:43,079 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-23T17:29:43,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=203 2024-11-23T17:29:43,079 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-23T17:29:43,081 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-23T17:29:43,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742239_1415 (size=180) 2024-11-23T17:29:43,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742239_1415 (size=180) 2024-11-23T17:29:43,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742239_1415 (size=180) 2024-11-23T17:29:43,096 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-23T17:29:43,096 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=204, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b16ac99853753198964894fc0ca70344}, {pid=205, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 75de5a87a12fe52ded93c5475af83b5e}] 2024-11-23T17:29:43,097 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=204, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b16ac99853753198964894fc0ca70344 2024-11-23T17:29:43,097 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=205, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 75de5a87a12fe52ded93c5475af83b5e 2024-11-23T17:29:43,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=203 2024-11-23T17:29:43,249 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41065 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=204 2024-11-23T17:29:43,249 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44667 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=205 2024-11-23T17:29:43,249 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,1,1732382982058.75de5a87a12fe52ded93c5475af83b5e. 2024-11-23T17:29:43,249 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,,1732382982058.b16ac99853753198964894fc0ca70344. 2024-11-23T17:29:43,250 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegion(2902): Flushing b16ac99853753198964894fc0ca70344 1/1 column families, dataSize=132 B heapSize=544 B 2024-11-23T17:29:43,250 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegion(2902): Flushing 75de5a87a12fe52ded93c5475af83b5e 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-11-23T17:29:43,266 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testEmptyExportFileSystemState/75de5a87a12fe52ded93c5475af83b5e/.tmp/cf/c4a39472a2a346aeb9bfcd500fb98284 is 71, key is 10015fedd143f8947ade7ef648c2365f/cf:q/1732382983040/Put/seqid=0 2024-11-23T17:29:43,269 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testEmptyExportFileSystemState/b16ac99853753198964894fc0ca70344/.tmp/cf/fab8201ff62e4d9a9a203cca2084fa0e is 71, key is 066fe99bcae847fd101dd4a5b42e993a/cf:q/1732382983037/Put/seqid=0 2024-11-23T17:29:43,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742241_1417 (size=5216) 2024-11-23T17:29:43,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742241_1417 (size=5216) 2024-11-23T17:29:43,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742241_1417 (size=5216) 2024-11-23T17:29:43,286 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=132 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testEmptyExportFileSystemState/b16ac99853753198964894fc0ca70344/.tmp/cf/fab8201ff62e4d9a9a203cca2084fa0e 2024-11-23T17:29:43,292 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testEmptyExportFileSystemState/b16ac99853753198964894fc0ca70344/.tmp/cf/fab8201ff62e4d9a9a203cca2084fa0e as hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testEmptyExportFileSystemState/b16ac99853753198964894fc0ca70344/cf/fab8201ff62e4d9a9a203cca2084fa0e 2024-11-23T17:29:43,298 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testEmptyExportFileSystemState/b16ac99853753198964894fc0ca70344/cf/fab8201ff62e4d9a9a203cca2084fa0e, entries=2, sequenceid=6, filesize=5.1 K 2024-11-23T17:29:43,299 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegion(3140): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for b16ac99853753198964894fc0ca70344 in 50ms, sequenceid=6, compaction requested=false 2024-11-23T17:29:43,299 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testEmptyExportFileSystemState' 2024-11-23T17:29:43,299 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegion(2603): Flush status journal for b16ac99853753198964894fc0ca70344: 2024-11-23T17:29:43,299 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,,1732382982058.b16ac99853753198964894fc0ca70344. for snaptb0-testEmptyExportFileSystemState completed. 2024-11-23T17:29:43,300 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,,1732382982058.b16ac99853753198964894fc0ca70344.' region-info for snapshot=snaptb0-testEmptyExportFileSystemState 2024-11-23T17:29:43,300 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:29:43,300 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testEmptyExportFileSystemState/b16ac99853753198964894fc0ca70344/cf/fab8201ff62e4d9a9a203cca2084fa0e] hfiles 2024-11-23T17:29:43,300 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testEmptyExportFileSystemState/b16ac99853753198964894fc0ca70344/cf/fab8201ff62e4d9a9a203cca2084fa0e for snapshot=snaptb0-testEmptyExportFileSystemState 2024-11-23T17:29:43,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742240_1416 (size=8392) 2024-11-23T17:29:43,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742240_1416 (size=8392) 2024-11-23T17:29:43,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742240_1416 (size=8392) 2024-11-23T17:29:43,306 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.13 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testEmptyExportFileSystemState/75de5a87a12fe52ded93c5475af83b5e/.tmp/cf/c4a39472a2a346aeb9bfcd500fb98284 2024-11-23T17:29:43,310 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testEmptyExportFileSystemState/75de5a87a12fe52ded93c5475af83b5e/.tmp/cf/c4a39472a2a346aeb9bfcd500fb98284 as hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testEmptyExportFileSystemState/75de5a87a12fe52ded93c5475af83b5e/cf/c4a39472a2a346aeb9bfcd500fb98284 2024-11-23T17:29:43,315 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testEmptyExportFileSystemState/75de5a87a12fe52ded93c5475af83b5e/cf/c4a39472a2a346aeb9bfcd500fb98284, entries=48, sequenceid=6, filesize=8.2 K 2024-11-23T17:29:43,316 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegion(3140): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 75de5a87a12fe52ded93c5475af83b5e in 67ms, sequenceid=6, compaction requested=false 2024-11-23T17:29:43,316 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegion(2603): Flush status journal for 75de5a87a12fe52ded93c5475af83b5e: 2024-11-23T17:29:43,316 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,1,1732382982058.75de5a87a12fe52ded93c5475af83b5e. for snaptb0-testEmptyExportFileSystemState completed. 2024-11-23T17:29:43,316 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,1,1732382982058.75de5a87a12fe52ded93c5475af83b5e.' region-info for snapshot=snaptb0-testEmptyExportFileSystemState 2024-11-23T17:29:43,316 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:29:43,316 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testEmptyExportFileSystemState/75de5a87a12fe52ded93c5475af83b5e/cf/c4a39472a2a346aeb9bfcd500fb98284] hfiles 2024-11-23T17:29:43,316 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testEmptyExportFileSystemState/75de5a87a12fe52ded93c5475af83b5e/cf/c4a39472a2a346aeb9bfcd500fb98284 for snapshot=snaptb0-testEmptyExportFileSystemState 2024-11-23T17:29:43,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742242_1418 (size=115) 2024-11-23T17:29:43,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742242_1418 (size=115) 2024-11-23T17:29:43,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742242_1418 (size=115) 2024-11-23T17:29:43,333 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,,1732382982058.b16ac99853753198964894fc0ca70344. 2024-11-23T17:29:43,334 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=204 2024-11-23T17:29:43,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.HMaster(4169): Remote procedure done, pid=204 2024-11-23T17:29:43,334 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testEmptyExportFileSystemState on region b16ac99853753198964894fc0ca70344 2024-11-23T17:29:43,334 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=204, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b16ac99853753198964894fc0ca70344 2024-11-23T17:29:43,337 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=204, ppid=203, state=SUCCESS, hasLock=false; SnapshotRegionProcedure b16ac99853753198964894fc0ca70344 in 239 msec 2024-11-23T17:29:43,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742243_1419 (size=115) 2024-11-23T17:29:43,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742243_1419 (size=115) 2024-11-23T17:29:43,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742243_1419 (size=115) 2024-11-23T17:29:43,348 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,1,1732382982058.75de5a87a12fe52ded93c5475af83b5e. 2024-11-23T17:29:43,348 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=205 2024-11-23T17:29:43,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.HMaster(4169): Remote procedure done, pid=205 2024-11-23T17:29:43,349 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testEmptyExportFileSystemState on region 75de5a87a12fe52ded93c5475af83b5e 2024-11-23T17:29:43,349 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=205, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 75de5a87a12fe52ded93c5475af83b5e 2024-11-23T17:29:43,352 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=205, resume processing ppid=203 2024-11-23T17:29:43,352 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-23T17:29:43,352 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=205, ppid=203, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 75de5a87a12fe52ded93c5475af83b5e in 254 msec 2024-11-23T17:29:43,353 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-23T17:29:43,354 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-23T17:29:43,354 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testEmptyExportFileSystemState 2024-11-23T17:29:43,354 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/.tmp/snaptb0-testEmptyExportFileSystemState 2024-11-23T17:29:43,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742244_1420 (size=645) 2024-11-23T17:29:43,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742244_1420 (size=645) 2024-11-23T17:29:43,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742244_1420 (size=645) 2024-11-23T17:29:43,374 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-23T17:29:43,378 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-23T17:29:43,379 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/.tmp/snaptb0-testEmptyExportFileSystemState to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/snaptb0-testEmptyExportFileSystemState 2024-11-23T17:29:43,380 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-23T17:29:43,380 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 203 2024-11-23T17:29:43,382 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=203, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } in 303 msec 2024-11-23T17:29:43,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=203 2024-11-23T17:29:43,396 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-11-23T17:29:43,396 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732382983396 2024-11-23T17:29:43,396 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:37605, tgtDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732382983396, rawTgtDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732382983396, srcFsUri=hdfs://localhost:37605, srcDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf 2024-11-23T17:29:43,423 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:37605, inputRoot=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf 2024-11-23T17:29:43,423 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1325804072_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732382983396, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732382983396/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-11-23T17:29:43,425 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-11-23T17:29:43,428 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732382983396/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-11-23T17:29:43,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742245_1421 (size=185) 2024-11-23T17:29:43,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742245_1421 (size=185) 2024-11-23T17:29:43,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742246_1422 (size=567) 2024-11-23T17:29:43,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742245_1421 (size=185) 2024-11-23T17:29:43,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742246_1422 (size=567) 2024-11-23T17:29:43,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742246_1422 (size=567) 2024-11-23T17:29:43,441 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:29:43,442 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:29:43,442 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:29:44,480 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/hadoop-13059980747502997003.jar 2024-11-23T17:29:44,480 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:29:44,480 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:29:44,552 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/hadoop-831639887599876468.jar 2024-11-23T17:29:44,552 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:29:44,552 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:29:44,553 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:29:44,553 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:29:44,553 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:29:44,553 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:29:44,554 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-23T17:29:44,554 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-23T17:29:44,554 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-23T17:29:44,554 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-23T17:29:44,555 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-23T17:29:44,555 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-23T17:29:44,555 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-23T17:29:44,556 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-23T17:29:44,556 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-23T17:29:44,556 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-23T17:29:44,556 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-23T17:29:44,557 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:29:44,557 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:29:44,557 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T17:29:44,557 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:29:44,558 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:29:44,558 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T17:29:44,558 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T17:29:44,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742247_1423 (size=131440) 2024-11-23T17:29:44,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742247_1423 (size=131440) 2024-11-23T17:29:44,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742247_1423 (size=131440) 2024-11-23T17:29:44,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742248_1424 (size=4188619) 2024-11-23T17:29:44,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742248_1424 (size=4188619) 2024-11-23T17:29:44,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742248_1424 (size=4188619) 2024-11-23T17:29:44,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742249_1425 (size=1323991) 2024-11-23T17:29:44,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742249_1425 (size=1323991) 2024-11-23T17:29:44,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742249_1425 (size=1323991) 2024-11-23T17:29:44,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742250_1426 (size=6424742) 2024-11-23T17:29:44,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742250_1426 (size=6424742) 2024-11-23T17:29:44,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742250_1426 (size=6424742) 2024-11-23T17:29:44,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742251_1427 (size=903733) 2024-11-23T17:29:44,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742251_1427 (size=903733) 2024-11-23T17:29:44,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742251_1427 (size=903733) 2024-11-23T17:29:44,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742252_1428 (size=8360083) 2024-11-23T17:29:44,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742252_1428 (size=8360083) 2024-11-23T17:29:44,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742252_1428 (size=8360083) 2024-11-23T17:29:44,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742253_1429 (size=1877034) 2024-11-23T17:29:44,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742253_1429 (size=1877034) 2024-11-23T17:29:44,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742253_1429 (size=1877034) 2024-11-23T17:29:44,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742254_1430 (size=77835) 2024-11-23T17:29:44,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742254_1430 (size=77835) 2024-11-23T17:29:44,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742254_1430 (size=77835) 2024-11-23T17:29:44,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742255_1431 (size=30949) 2024-11-23T17:29:44,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742255_1431 (size=30949) 2024-11-23T17:29:44,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742255_1431 (size=30949) 2024-11-23T17:29:44,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742256_1432 (size=1597347) 2024-11-23T17:29:44,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742256_1432 (size=1597347) 2024-11-23T17:29:44,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742256_1432 (size=1597347) 2024-11-23T17:29:44,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742257_1433 (size=4695811) 2024-11-23T17:29:44,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742257_1433 (size=4695811) 2024-11-23T17:29:44,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742257_1433 (size=4695811) 2024-11-23T17:29:44,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742258_1434 (size=232957) 2024-11-23T17:29:44,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742258_1434 (size=232957) 2024-11-23T17:29:44,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742258_1434 (size=232957) 2024-11-23T17:29:44,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742259_1435 (size=127628) 2024-11-23T17:29:44,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742259_1435 (size=127628) 2024-11-23T17:29:44,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742259_1435 (size=127628) 2024-11-23T17:29:44,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742260_1436 (size=20406) 2024-11-23T17:29:44,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742260_1436 (size=20406) 2024-11-23T17:29:44,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742260_1436 (size=20406) 2024-11-23T17:29:44,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742261_1437 (size=5175431) 2024-11-23T17:29:44,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742261_1437 (size=5175431) 2024-11-23T17:29:44,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742261_1437 (size=5175431) 2024-11-23T17:29:44,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742262_1438 (size=217634) 2024-11-23T17:29:44,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742262_1438 (size=217634) 2024-11-23T17:29:44,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742262_1438 (size=217634) 2024-11-23T17:29:44,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742263_1439 (size=1832290) 2024-11-23T17:29:44,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742263_1439 (size=1832290) 2024-11-23T17:29:44,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742263_1439 (size=1832290) 2024-11-23T17:29:44,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742264_1440 (size=322274) 2024-11-23T17:29:44,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742264_1440 (size=322274) 2024-11-23T17:29:44,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742264_1440 (size=322274) 2024-11-23T17:29:44,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742265_1441 (size=503880) 2024-11-23T17:29:44,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742265_1441 (size=503880) 2024-11-23T17:29:44,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742265_1441 (size=503880) 2024-11-23T17:29:44,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742266_1442 (size=440957) 2024-11-23T17:29:44,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742266_1442 (size=440957) 2024-11-23T17:29:44,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742266_1442 (size=440957) 2024-11-23T17:29:44,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742267_1443 (size=29229) 2024-11-23T17:29:44,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742267_1443 (size=29229) 2024-11-23T17:29:44,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742267_1443 (size=29229) 2024-11-23T17:29:44,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742268_1444 (size=24096) 2024-11-23T17:29:44,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742268_1444 (size=24096) 2024-11-23T17:29:44,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742268_1444 (size=24096) 2024-11-23T17:29:44,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742269_1445 (size=111872) 2024-11-23T17:29:44,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742269_1445 (size=111872) 2024-11-23T17:29:44,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742269_1445 (size=111872) 2024-11-23T17:29:44,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742270_1446 (size=45609) 2024-11-23T17:29:44,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742270_1446 (size=45609) 2024-11-23T17:29:44,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742270_1446 (size=45609) 2024-11-23T17:29:44,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742271_1447 (size=136454) 2024-11-23T17:29:44,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742271_1447 (size=136454) 2024-11-23T17:29:44,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742271_1447 (size=136454) 2024-11-23T17:29:44,930 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-23T17:29:44,932 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'emptySnaptb0-testEmptyExportFileSystemState' hfile list 2024-11-23T17:29:44,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742272_1448 (size=7) 2024-11-23T17:29:44,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742272_1448 (size=7) 2024-11-23T17:29:44,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742272_1448 (size=7) 2024-11-23T17:29:44,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742273_1449 (size=10) 2024-11-23T17:29:44,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742273_1449 (size=10) 2024-11-23T17:29:44,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742273_1449 (size=10) 2024-11-23T17:29:44,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742274_1450 (size=303903) 2024-11-23T17:29:44,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742274_1450 (size=303903) 2024-11-23T17:29:44,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742274_1450 (size=303903) 2024-11-23T17:29:45,004 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-23T17:29:45,004 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-23T17:29:45,106 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732382632246_0008_000001 (auth:SIMPLE) from 127.0.0.1:60500 2024-11-23T17:29:45,432 DEBUG [master/215bb12e999f:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region b16ac99853753198964894fc0ca70344 changed from -1.0 to 0.0, refreshing cache 2024-11-23T17:29:45,432 DEBUG [master/215bb12e999f:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 6f88863827b64acc564823a1044a6b2b changed from -1.0 to 0.0, refreshing cache 2024-11-23T17:29:45,432 DEBUG [master/215bb12e999f:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 75de5a87a12fe52ded93c5475af83b5e changed from -1.0 to 0.0, refreshing cache 2024-11-23T17:29:45,432 DEBUG [master/215bb12e999f:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 5e511dfc454574bea858d074620b30f9 changed from -1.0 to 0.0, refreshing cache 2024-11-23T17:29:47,251 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-23T17:29:47,293 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 41264850465b123187f03a78b2521b39, had cached 0 bytes from a total of 5595 2024-11-23T17:29:49,806 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState 2024-11-23T17:29:49,806 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState Metrics about Tables on a single HBase RegionServer 2024-11-23T17:29:49,807 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot 2024-11-23T17:29:50,186 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732382632246_0008_000001 (auth:SIMPLE) from 127.0.0.1:37950 2024-11-23T17:29:50,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742275_1451 (size=349577) 2024-11-23T17:29:50,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742275_1451 (size=349577) 2024-11-23T17:29:50,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742275_1451 (size=349577) 2024-11-23T17:29:51,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742276_1452 (size=8568) 2024-11-23T17:29:51,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742276_1452 (size=8568) 2024-11-23T17:29:51,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742276_1452 (size=8568) 2024-11-23T17:29:51,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742277_1453 (size=460) 2024-11-23T17:29:51,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742277_1453 (size=460) 2024-11-23T17:29:51,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742277_1453 (size=460) 2024-11-23T17:29:51,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742278_1454 (size=8568) 2024-11-23T17:29:51,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742278_1454 (size=8568) 2024-11-23T17:29:51,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742278_1454 (size=8568) 2024-11-23T17:29:51,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742279_1455 (size=349577) 2024-11-23T17:29:51,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742279_1455 (size=349577) 2024-11-23T17:29:51,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742279_1455 (size=349577) 2024-11-23T17:29:53,097 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-11-23T17:29:53,144 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-11-23T17:29:53,150 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: emptySnaptb0-testEmptyExportFileSystemState 2024-11-23T17:29:53,150 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-11-23T17:29:53,151 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-11-23T17:29:53,151 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1325804072_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState at hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-11-23T17:29:53,151 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/.snapshotinfo 2024-11-23T17:29:53,151 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/data.manifest 2024-11-23T17:29:53,151 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1325804072_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732382983396/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState at hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732382983396/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-11-23T17:29:53,152 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732382983396/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/.snapshotinfo 2024-11-23T17:29:53,152 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732382983396/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/data.manifest 2024-11-23T17:29:53,157 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testEmptyExportFileSystemState 2024-11-23T17:29:53,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure2.ProcedureExecutor(1139): Stored pid=206, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-23T17:29:53,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=206 2024-11-23T17:29:53,160 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732382993160"}]},"ts":"1732382993160"} 2024-11-23T17:29:53,161 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=DISABLING in hbase:meta 2024-11-23T17:29:53,161 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set testtb-testEmptyExportFileSystemState to state=DISABLING 2024-11-23T17:29:53,162 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=207, ppid=206, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testEmptyExportFileSystemState}] 2024-11-23T17:29:53,163 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=208, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=b16ac99853753198964894fc0ca70344, UNASSIGN}, {pid=209, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=75de5a87a12fe52ded93c5475af83b5e, UNASSIGN}] 2024-11-23T17:29:53,164 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=209, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=75de5a87a12fe52ded93c5475af83b5e, UNASSIGN 2024-11-23T17:29:53,164 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=208, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=b16ac99853753198964894fc0ca70344, UNASSIGN 2024-11-23T17:29:53,165 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=209 updating hbase:meta row=75de5a87a12fe52ded93c5475af83b5e, regionState=CLOSING, regionLocation=215bb12e999f,44667,1732382620918 2024-11-23T17:29:53,165 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=208 updating hbase:meta row=b16ac99853753198964894fc0ca70344, regionState=CLOSING, regionLocation=215bb12e999f,41065,1732382621000 2024-11-23T17:29:53,166 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=208, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=b16ac99853753198964894fc0ca70344, UNASSIGN because future has completed 2024-11-23T17:29:53,166 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T17:29:53,166 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=210, ppid=208, state=RUNNABLE, hasLock=false; CloseRegionProcedure b16ac99853753198964894fc0ca70344, server=215bb12e999f,41065,1732382621000}] 2024-11-23T17:29:53,167 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=209, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=75de5a87a12fe52ded93c5475af83b5e, UNASSIGN because future has completed 2024-11-23T17:29:53,167 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T17:29:53,167 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=211, ppid=209, state=RUNNABLE, hasLock=false; CloseRegionProcedure 75de5a87a12fe52ded93c5475af83b5e, server=215bb12e999f,44667,1732382620918}] 2024-11-23T17:29:53,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=206 2024-11-23T17:29:53,319 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] handler.UnassignRegionHandler(122): Close b16ac99853753198964894fc0ca70344 2024-11-23T17:29:53,319 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T17:29:53,319 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1722): Closing b16ac99853753198964894fc0ca70344, disabling compactions & flushes 2024-11-23T17:29:53,319 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,,1732382982058.b16ac99853753198964894fc0ca70344. 2024-11-23T17:29:53,319 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,,1732382982058.b16ac99853753198964894fc0ca70344. 2024-11-23T17:29:53,319 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,,1732382982058.b16ac99853753198964894fc0ca70344. after waiting 0 ms 2024-11-23T17:29:53,319 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,,1732382982058.b16ac99853753198964894fc0ca70344. 2024-11-23T17:29:53,319 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] handler.UnassignRegionHandler(122): Close 75de5a87a12fe52ded93c5475af83b5e 2024-11-23T17:29:53,320 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T17:29:53,320 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1722): Closing 75de5a87a12fe52ded93c5475af83b5e, disabling compactions & flushes 2024-11-23T17:29:53,320 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,1,1732382982058.75de5a87a12fe52ded93c5475af83b5e. 2024-11-23T17:29:53,320 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,1,1732382982058.75de5a87a12fe52ded93c5475af83b5e. 2024-11-23T17:29:53,320 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,1,1732382982058.75de5a87a12fe52ded93c5475af83b5e. after waiting 0 ms 2024-11-23T17:29:53,320 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,1,1732382982058.75de5a87a12fe52ded93c5475af83b5e. 2024-11-23T17:29:53,322 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testEmptyExportFileSystemState/b16ac99853753198964894fc0ca70344/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-23T17:29:53,322 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testEmptyExportFileSystemState/75de5a87a12fe52ded93c5475af83b5e/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-23T17:29:53,323 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:29:53,323 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:29:53,323 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,,1732382982058.b16ac99853753198964894fc0ca70344. 2024-11-23T17:29:53,323 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,1,1732382982058.75de5a87a12fe52ded93c5475af83b5e. 2024-11-23T17:29:53,323 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1676): Region close journal for b16ac99853753198964894fc0ca70344: Waiting for close lock at 1732382993319Running coprocessor pre-close hooks at 1732382993319Disabling compacts and flushes for region at 1732382993319Disabling writes for close at 1732382993319Writing region close event to WAL at 1732382993320 (+1 ms)Running coprocessor post-close hooks at 1732382993323 (+3 ms)Closed at 1732382993323 2024-11-23T17:29:53,323 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1676): Region close journal for 75de5a87a12fe52ded93c5475af83b5e: Waiting for close lock at 1732382993320Running coprocessor pre-close hooks at 1732382993320Disabling compacts and flushes for region at 1732382993320Disabling writes for close at 1732382993320Writing region close event to WAL at 1732382993320Running coprocessor post-close hooks at 1732382993323 (+3 ms)Closed at 1732382993323 2024-11-23T17:29:53,325 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] handler.UnassignRegionHandler(157): Closed 75de5a87a12fe52ded93c5475af83b5e 2024-11-23T17:29:53,325 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=209 updating hbase:meta row=75de5a87a12fe52ded93c5475af83b5e, regionState=CLOSED 2024-11-23T17:29:53,326 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] handler.UnassignRegionHandler(157): Closed b16ac99853753198964894fc0ca70344 2024-11-23T17:29:53,326 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=208 updating hbase:meta row=b16ac99853753198964894fc0ca70344, regionState=CLOSED 2024-11-23T17:29:53,327 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=211, ppid=209, state=RUNNABLE, hasLock=false; CloseRegionProcedure 75de5a87a12fe52ded93c5475af83b5e, server=215bb12e999f,44667,1732382620918 because future has completed 2024-11-23T17:29:53,328 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=210, ppid=208, state=RUNNABLE, hasLock=false; CloseRegionProcedure b16ac99853753198964894fc0ca70344, server=215bb12e999f,41065,1732382621000 because future has completed 2024-11-23T17:29:53,329 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=211, resume processing ppid=209 2024-11-23T17:29:53,330 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=211, ppid=209, state=SUCCESS, hasLock=false; CloseRegionProcedure 75de5a87a12fe52ded93c5475af83b5e, server=215bb12e999f,44667,1732382620918 in 161 msec 2024-11-23T17:29:53,330 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=210, resume processing ppid=208 2024-11-23T17:29:53,330 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=210, ppid=208, state=SUCCESS, hasLock=false; CloseRegionProcedure b16ac99853753198964894fc0ca70344, server=215bb12e999f,41065,1732382621000 in 163 msec 2024-11-23T17:29:53,331 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=209, ppid=207, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=75de5a87a12fe52ded93c5475af83b5e, UNASSIGN in 167 msec 2024-11-23T17:29:53,332 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=208, resume processing ppid=207 2024-11-23T17:29:53,332 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=208, ppid=207, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=b16ac99853753198964894fc0ca70344, UNASSIGN in 167 msec 2024-11-23T17:29:53,334 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=207, resume processing ppid=206 2024-11-23T17:29:53,334 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=207, ppid=206, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testEmptyExportFileSystemState in 170 msec 2024-11-23T17:29:53,335 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732382993335"}]},"ts":"1732382993335"} 2024-11-23T17:29:53,336 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=DISABLED in hbase:meta 2024-11-23T17:29:53,336 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testtb-testEmptyExportFileSystemState to state=DISABLED 2024-11-23T17:29:53,338 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=206, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testEmptyExportFileSystemState in 179 msec 2024-11-23T17:29:53,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=206 2024-11-23T17:29:53,476 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-11-23T17:29:53,476 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testEmptyExportFileSystemState 2024-11-23T17:29:53,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure2.ProcedureExecutor(1139): Stored pid=212, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-23T17:29:53,478 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=212, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-23T17:29:53,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testEmptyExportFileSystemState 2024-11-23T17:29:53,479 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=212, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-23T17:29:53,481 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41065 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testEmptyExportFileSystemState 2024-11-23T17:29:53,482 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testEmptyExportFileSystemState/75de5a87a12fe52ded93c5475af83b5e 2024-11-23T17:29:53,482 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testEmptyExportFileSystemState/b16ac99853753198964894fc0ca70344 2024-11-23T17:29:53,483 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testEmptyExportFileSystemState/75de5a87a12fe52ded93c5475af83b5e/cf, FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testEmptyExportFileSystemState/75de5a87a12fe52ded93c5475af83b5e/recovered.edits] 2024-11-23T17:29:53,483 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testEmptyExportFileSystemState/b16ac99853753198964894fc0ca70344/cf, FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testEmptyExportFileSystemState/b16ac99853753198964894fc0ca70344/recovered.edits] 2024-11-23T17:29:53,529 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testEmptyExportFileSystemState/75de5a87a12fe52ded93c5475af83b5e/cf/c4a39472a2a346aeb9bfcd500fb98284 to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/archive/data/default/testtb-testEmptyExportFileSystemState/75de5a87a12fe52ded93c5475af83b5e/cf/c4a39472a2a346aeb9bfcd500fb98284 2024-11-23T17:29:53,530 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testEmptyExportFileSystemState/b16ac99853753198964894fc0ca70344/cf/fab8201ff62e4d9a9a203cca2084fa0e to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/archive/data/default/testtb-testEmptyExportFileSystemState/b16ac99853753198964894fc0ca70344/cf/fab8201ff62e4d9a9a203cca2084fa0e 2024-11-23T17:29:53,531 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testEmptyExportFileSystemState/75de5a87a12fe52ded93c5475af83b5e/recovered.edits/9.seqid to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/archive/data/default/testtb-testEmptyExportFileSystemState/75de5a87a12fe52ded93c5475af83b5e/recovered.edits/9.seqid 2024-11-23T17:29:53,532 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testEmptyExportFileSystemState/75de5a87a12fe52ded93c5475af83b5e 2024-11-23T17:29:53,532 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testEmptyExportFileSystemState/b16ac99853753198964894fc0ca70344/recovered.edits/9.seqid to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/archive/data/default/testtb-testEmptyExportFileSystemState/b16ac99853753198964894fc0ca70344/recovered.edits/9.seqid 2024-11-23T17:29:53,532 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testEmptyExportFileSystemState/b16ac99853753198964894fc0ca70344 2024-11-23T17:29:53,532 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testEmptyExportFileSystemState regions 2024-11-23T17:29:53,534 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=212, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-23T17:29:53,536 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testEmptyExportFileSystemState from hbase:meta 2024-11-23T17:29:53,538 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testEmptyExportFileSystemState' descriptor. 2024-11-23T17:29:53,539 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=212, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-23T17:29:53,539 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testEmptyExportFileSystemState' from region states. 2024-11-23T17:29:53,539 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState,,1732382982058.b16ac99853753198964894fc0ca70344.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732382993539"}]},"ts":"9223372036854775807"} 2024-11-23T17:29:53,539 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState,1,1732382982058.75de5a87a12fe52ded93c5475af83b5e.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732382993539"}]},"ts":"9223372036854775807"} 2024-11-23T17:29:53,541 INFO [PEWorker-4 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-23T17:29:53,541 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => b16ac99853753198964894fc0ca70344, NAME => 'testtb-testEmptyExportFileSystemState,,1732382982058.b16ac99853753198964894fc0ca70344.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 75de5a87a12fe52ded93c5475af83b5e, NAME => 'testtb-testEmptyExportFileSystemState,1,1732382982058.75de5a87a12fe52ded93c5475af83b5e.', STARTKEY => '1', ENDKEY => ''}] 2024-11-23T17:29:53,541 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testEmptyExportFileSystemState' as deleted. 2024-11-23T17:29:53,541 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732382993541"}]},"ts":"9223372036854775807"} 2024-11-23T17:29:53,542 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testEmptyExportFileSystemState state from META 2024-11-23T17:29:53,543 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=212, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-23T17:29:53,544 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=212, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState in 67 msec 2024-11-23T17:29:53,576 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45467-0x10168b8ee290001, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-23T17:29:53,576 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x10168b8ee290003, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-23T17:29:53,576 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-23T17:29:53,576 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44667-0x10168b8ee290002, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-23T17:29:53,577 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-11-23T17:29:53,577 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-11-23T17:29:53,577 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-11-23T17:29:53,577 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-11-23T17:29:53,587 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45467-0x10168b8ee290001, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-23T17:29:53,587 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x10168b8ee290003, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-23T17:29:53,587 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44667-0x10168b8ee290002, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-23T17:29:53,587 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45467-0x10168b8ee290001, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:29:53,587 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x10168b8ee290003, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:29:53,587 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44667-0x10168b8ee290002, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:29:53,587 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-23T17:29:53,587 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:29:53,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=212 2024-11-23T17:29:53,588 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:29:53,588 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:29:53,588 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testEmptyExportFileSystemState 2024-11-23T17:29:53,588 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-11-23T17:29:53,588 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:29:53,588 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:29:53,593 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testEmptyExportFileSystemState" type: DISABLED 2024-11-23T17:29:53,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testEmptyExportFileSystemState 2024-11-23T17:29:53,596 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testEmptyExportFileSystemState" type: DISABLED 2024-11-23T17:29:53,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testEmptyExportFileSystemState 2024-11-23T17:29:53,617 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testEmptyExportFileSystemState Thread=819 (was 806) Potentially hanging thread: process reaper (pid 124632) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1176077031_1 at /127.0.0.1:49830 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:32909 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-6703 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: MoveIntermediateToDone Thread #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-23 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42625 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1549690208) connection to localhost/127.0.0.1:32909 from appattempt_1732382632246_0008_000001 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ApplicationMasterLauncher #14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MoveIntermediateToDone Thread #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1549690208) connection to localhost/127.0.0.1:36019 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1325804072_22 at /127.0.0.1:37574 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1549690208) connection to localhost/127.0.0.1:42625 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1325804072_22 at /127.0.0.1:54916 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1325804072_22 at /127.0.0.1:34416 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36019 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=820 (was 795) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=978 (was 986), ProcessCount=17 (was 17), AvailableMemoryMB=1872 (was 1994) 2024-11-23T17:29:53,617 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=819 is superior to 500 2024-11-23T17:29:53,633 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportWithChecksum Thread=819, OpenFileDescriptor=820, MaxFileDescriptor=1048576, SystemLoadAverage=978, ProcessCount=17, AvailableMemoryMB=1871 2024-11-23T17:29:53,634 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=819 is superior to 500 2024-11-23T17:29:53,635 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T17:29:53,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure2.ProcedureExecutor(1139): Stored pid=213, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithChecksum 2024-11-23T17:29:53,637 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T17:29:53,637 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:29:53,637 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithChecksum" procId is: 213 2024-11-23T17:29:53,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=213 2024-11-23T17:29:53,638 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T17:29:53,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742280_1456 (size=404) 2024-11-23T17:29:53,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742280_1456 (size=404) 2024-11-23T17:29:53,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742280_1456 (size=404) 2024-11-23T17:29:53,647 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 28c8e9ee69c0ee746fd3bc8995d8ac70, NAME => 'testtb-testExportWithChecksum,,1732382993635.28c8e9ee69c0ee746fd3bc8995d8ac70.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf 2024-11-23T17:29:53,648 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 50adbd0da757ccb80f4e38c5185fe307, NAME => 'testtb-testExportWithChecksum,1,1732382993635.50adbd0da757ccb80f4e38c5185fe307.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf 2024-11-23T17:29:53,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742281_1457 (size=65) 2024-11-23T17:29:53,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742281_1457 (size=65) 2024-11-23T17:29:53,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742281_1457 (size=65) 2024-11-23T17:29:53,655 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,,1732382993635.28c8e9ee69c0ee746fd3bc8995d8ac70.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:29:53,655 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1722): Closing 28c8e9ee69c0ee746fd3bc8995d8ac70, disabling compactions & flushes 2024-11-23T17:29:53,655 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,,1732382993635.28c8e9ee69c0ee746fd3bc8995d8ac70. 2024-11-23T17:29:53,655 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,,1732382993635.28c8e9ee69c0ee746fd3bc8995d8ac70. 2024-11-23T17:29:53,655 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,,1732382993635.28c8e9ee69c0ee746fd3bc8995d8ac70. after waiting 0 ms 2024-11-23T17:29:53,655 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,,1732382993635.28c8e9ee69c0ee746fd3bc8995d8ac70. 2024-11-23T17:29:53,655 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,,1732382993635.28c8e9ee69c0ee746fd3bc8995d8ac70. 2024-11-23T17:29:53,655 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1676): Region close journal for 28c8e9ee69c0ee746fd3bc8995d8ac70: Waiting for close lock at 1732382993655Disabling compacts and flushes for region at 1732382993655Disabling writes for close at 1732382993655Writing region close event to WAL at 1732382993655Closed at 1732382993655 2024-11-23T17:29:53,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742282_1458 (size=65) 2024-11-23T17:29:53,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742282_1458 (size=65) 2024-11-23T17:29:53,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742282_1458 (size=65) 2024-11-23T17:29:53,662 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,1,1732382993635.50adbd0da757ccb80f4e38c5185fe307.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:29:53,662 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1722): Closing 50adbd0da757ccb80f4e38c5185fe307, disabling compactions & flushes 2024-11-23T17:29:53,662 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,1,1732382993635.50adbd0da757ccb80f4e38c5185fe307. 2024-11-23T17:29:53,662 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,1,1732382993635.50adbd0da757ccb80f4e38c5185fe307. 2024-11-23T17:29:53,663 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,1,1732382993635.50adbd0da757ccb80f4e38c5185fe307. after waiting 0 ms 2024-11-23T17:29:53,663 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,1,1732382993635.50adbd0da757ccb80f4e38c5185fe307. 2024-11-23T17:29:53,663 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,1,1732382993635.50adbd0da757ccb80f4e38c5185fe307. 2024-11-23T17:29:53,663 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1676): Region close journal for 50adbd0da757ccb80f4e38c5185fe307: Waiting for close lock at 1732382993662Disabling compacts and flushes for region at 1732382993662Disabling writes for close at 1732382993663 (+1 ms)Writing region close event to WAL at 1732382993663Closed at 1732382993663 2024-11-23T17:29:53,664 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T17:29:53,664 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithChecksum,,1732382993635.28c8e9ee69c0ee746fd3bc8995d8ac70.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1732382993664"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732382993664"}]},"ts":"1732382993664"} 2024-11-23T17:29:53,664 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithChecksum,1,1732382993635.50adbd0da757ccb80f4e38c5185fe307.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1732382993664"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732382993664"}]},"ts":"1732382993664"} 2024-11-23T17:29:53,666 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-23T17:29:53,667 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T17:29:53,667 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732382993667"}]},"ts":"1732382993667"} 2024-11-23T17:29:53,669 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=ENABLING in hbase:meta 2024-11-23T17:29:53,669 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {215bb12e999f=0} racks are {/default-rack=0} 2024-11-23T17:29:53,670 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-23T17:29:53,670 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-23T17:29:53,670 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-23T17:29:53,670 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-23T17:29:53,670 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-23T17:29:53,670 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-23T17:29:53,670 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-23T17:29:53,670 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-23T17:29:53,670 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-23T17:29:53,670 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-23T17:29:53,671 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=214, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=28c8e9ee69c0ee746fd3bc8995d8ac70, ASSIGN}, {pid=215, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=50adbd0da757ccb80f4e38c5185fe307, ASSIGN}] 2024-11-23T17:29:53,672 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=215, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=50adbd0da757ccb80f4e38c5185fe307, ASSIGN 2024-11-23T17:29:53,672 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=214, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=28c8e9ee69c0ee746fd3bc8995d8ac70, ASSIGN 2024-11-23T17:29:53,672 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=215, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=50adbd0da757ccb80f4e38c5185fe307, ASSIGN; state=OFFLINE, location=215bb12e999f,45467,1732382620632; forceNewPlan=false, retain=false 2024-11-23T17:29:53,672 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=214, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=28c8e9ee69c0ee746fd3bc8995d8ac70, ASSIGN; state=OFFLINE, location=215bb12e999f,44667,1732382620918; forceNewPlan=false, retain=false 2024-11-23T17:29:53,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=213 2024-11-23T17:29:53,822 INFO [215bb12e999f:39621 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-23T17:29:53,823 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=215 updating hbase:meta row=50adbd0da757ccb80f4e38c5185fe307, regionState=OPENING, regionLocation=215bb12e999f,45467,1732382620632 2024-11-23T17:29:53,823 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=214 updating hbase:meta row=28c8e9ee69c0ee746fd3bc8995d8ac70, regionState=OPENING, regionLocation=215bb12e999f,44667,1732382620918 2024-11-23T17:29:53,824 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=214, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=28c8e9ee69c0ee746fd3bc8995d8ac70, ASSIGN because future has completed 2024-11-23T17:29:53,824 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=216, ppid=214, state=RUNNABLE, hasLock=false; OpenRegionProcedure 28c8e9ee69c0ee746fd3bc8995d8ac70, server=215bb12e999f,44667,1732382620918}] 2024-11-23T17:29:53,825 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=215, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=50adbd0da757ccb80f4e38c5185fe307, ASSIGN because future has completed 2024-11-23T17:29:53,825 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=217, ppid=215, state=RUNNABLE, hasLock=false; OpenRegionProcedure 50adbd0da757ccb80f4e38c5185fe307, server=215bb12e999f,45467,1732382620632}] 2024-11-23T17:29:53,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=213 2024-11-23T17:29:53,979 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] handler.AssignRegionHandler(132): Open testtb-testExportWithChecksum,,1732382993635.28c8e9ee69c0ee746fd3bc8995d8ac70. 2024-11-23T17:29:53,979 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(7752): Opening region: {ENCODED => 28c8e9ee69c0ee746fd3bc8995d8ac70, NAME => 'testtb-testExportWithChecksum,,1732382993635.28c8e9ee69c0ee746fd3bc8995d8ac70.', STARTKEY => '', ENDKEY => '1'} 2024-11-23T17:29:53,979 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithChecksum,,1732382993635.28c8e9ee69c0ee746fd3bc8995d8ac70. service=AccessControlService 2024-11-23T17:29:53,979 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] handler.AssignRegionHandler(132): Open testtb-testExportWithChecksum,1,1732382993635.50adbd0da757ccb80f4e38c5185fe307. 2024-11-23T17:29:53,979 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(7752): Opening region: {ENCODED => 50adbd0da757ccb80f4e38c5185fe307, NAME => 'testtb-testExportWithChecksum,1,1732382993635.50adbd0da757ccb80f4e38c5185fe307.', STARTKEY => '1', ENDKEY => ''} 2024-11-23T17:29:53,979 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:29:53,979 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithChecksum,1,1732382993635.50adbd0da757ccb80f4e38c5185fe307. service=AccessControlService 2024-11-23T17:29:53,979 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum 28c8e9ee69c0ee746fd3bc8995d8ac70 2024-11-23T17:29:53,979 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,,1732382993635.28c8e9ee69c0ee746fd3bc8995d8ac70.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:29:53,980 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:29:53,980 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(7794): checking encryption for 28c8e9ee69c0ee746fd3bc8995d8ac70 2024-11-23T17:29:53,980 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(7797): checking classloading for 28c8e9ee69c0ee746fd3bc8995d8ac70 2024-11-23T17:29:53,980 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum 50adbd0da757ccb80f4e38c5185fe307 2024-11-23T17:29:53,980 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,1,1732382993635.50adbd0da757ccb80f4e38c5185fe307.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:29:53,980 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(7794): checking encryption for 50adbd0da757ccb80f4e38c5185fe307 2024-11-23T17:29:53,980 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(7797): checking classloading for 50adbd0da757ccb80f4e38c5185fe307 2024-11-23T17:29:53,981 INFO [StoreOpener-50adbd0da757ccb80f4e38c5185fe307-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 50adbd0da757ccb80f4e38c5185fe307 2024-11-23T17:29:53,981 INFO [StoreOpener-28c8e9ee69c0ee746fd3bc8995d8ac70-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 28c8e9ee69c0ee746fd3bc8995d8ac70 2024-11-23T17:29:53,982 INFO [StoreOpener-50adbd0da757ccb80f4e38c5185fe307-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 50adbd0da757ccb80f4e38c5185fe307 columnFamilyName cf 2024-11-23T17:29:53,982 INFO [StoreOpener-28c8e9ee69c0ee746fd3bc8995d8ac70-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 28c8e9ee69c0ee746fd3bc8995d8ac70 columnFamilyName cf 2024-11-23T17:29:53,982 DEBUG [StoreOpener-50adbd0da757ccb80f4e38c5185fe307-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:29:53,982 DEBUG [StoreOpener-28c8e9ee69c0ee746fd3bc8995d8ac70-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:29:53,982 INFO [StoreOpener-28c8e9ee69c0ee746fd3bc8995d8ac70-1 {}] regionserver.HStore(327): Store=28c8e9ee69c0ee746fd3bc8995d8ac70/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T17:29:53,982 INFO [StoreOpener-50adbd0da757ccb80f4e38c5185fe307-1 {}] regionserver.HStore(327): Store=50adbd0da757ccb80f4e38c5185fe307/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T17:29:53,983 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1038): replaying wal for 50adbd0da757ccb80f4e38c5185fe307 2024-11-23T17:29:53,983 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1038): replaying wal for 28c8e9ee69c0ee746fd3bc8995d8ac70 2024-11-23T17:29:53,983 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithChecksum/28c8e9ee69c0ee746fd3bc8995d8ac70 2024-11-23T17:29:53,983 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithChecksum/50adbd0da757ccb80f4e38c5185fe307 2024-11-23T17:29:53,983 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithChecksum/28c8e9ee69c0ee746fd3bc8995d8ac70 2024-11-23T17:29:53,983 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithChecksum/50adbd0da757ccb80f4e38c5185fe307 2024-11-23T17:29:53,984 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1048): stopping wal replay for 28c8e9ee69c0ee746fd3bc8995d8ac70 2024-11-23T17:29:53,984 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1048): stopping wal replay for 50adbd0da757ccb80f4e38c5185fe307 2024-11-23T17:29:53,984 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1060): Cleaning up temporary data for 28c8e9ee69c0ee746fd3bc8995d8ac70 2024-11-23T17:29:53,984 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1060): Cleaning up temporary data for 50adbd0da757ccb80f4e38c5185fe307 2024-11-23T17:29:53,985 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1093): writing seq id for 50adbd0da757ccb80f4e38c5185fe307 2024-11-23T17:29:53,985 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1093): writing seq id for 28c8e9ee69c0ee746fd3bc8995d8ac70 2024-11-23T17:29:53,987 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithChecksum/50adbd0da757ccb80f4e38c5185fe307/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T17:29:53,987 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithChecksum/28c8e9ee69c0ee746fd3bc8995d8ac70/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T17:29:53,987 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1114): Opened 50adbd0da757ccb80f4e38c5185fe307; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67974078, jitterRate=0.012892693281173706}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T17:29:53,987 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 50adbd0da757ccb80f4e38c5185fe307 2024-11-23T17:29:53,988 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1006): Region open journal for 50adbd0da757ccb80f4e38c5185fe307: Running coprocessor pre-open hook at 1732382993980Writing region info on filesystem at 1732382993980Initializing all the Stores at 1732382993980Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732382993981 (+1 ms)Cleaning up temporary data from old regions at 1732382993984 (+3 ms)Running coprocessor post-open hooks at 1732382993987 (+3 ms)Region opened successfully at 1732382993987 2024-11-23T17:29:53,988 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1114): Opened 28c8e9ee69c0ee746fd3bc8995d8ac70; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63773214, jitterRate=-0.049705058336257935}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T17:29:53,988 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 28c8e9ee69c0ee746fd3bc8995d8ac70 2024-11-23T17:29:53,988 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1006): Region open journal for 28c8e9ee69c0ee746fd3bc8995d8ac70: Running coprocessor pre-open hook at 1732382993980Writing region info on filesystem at 1732382993980Initializing all the Stores at 1732382993980Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732382993980Cleaning up temporary data from old regions at 1732382993984 (+4 ms)Running coprocessor post-open hooks at 1732382993988 (+4 ms)Region opened successfully at 1732382993988 2024-11-23T17:29:53,988 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithChecksum,,1732382993635.28c8e9ee69c0ee746fd3bc8995d8ac70., pid=216, masterSystemTime=1732382993976 2024-11-23T17:29:53,988 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithChecksum,1,1732382993635.50adbd0da757ccb80f4e38c5185fe307., pid=217, masterSystemTime=1732382993977 2024-11-23T17:29:53,990 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithChecksum,1,1732382993635.50adbd0da757ccb80f4e38c5185fe307. 2024-11-23T17:29:53,990 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] handler.AssignRegionHandler(153): Opened testtb-testExportWithChecksum,1,1732382993635.50adbd0da757ccb80f4e38c5185fe307. 2024-11-23T17:29:53,990 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=215 updating hbase:meta row=50adbd0da757ccb80f4e38c5185fe307, regionState=OPEN, openSeqNum=2, regionLocation=215bb12e999f,45467,1732382620632 2024-11-23T17:29:53,991 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithChecksum,,1732382993635.28c8e9ee69c0ee746fd3bc8995d8ac70. 2024-11-23T17:29:53,991 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] handler.AssignRegionHandler(153): Opened testtb-testExportWithChecksum,,1732382993635.28c8e9ee69c0ee746fd3bc8995d8ac70. 2024-11-23T17:29:53,991 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=214 updating hbase:meta row=28c8e9ee69c0ee746fd3bc8995d8ac70, regionState=OPEN, openSeqNum=2, regionLocation=215bb12e999f,44667,1732382620918 2024-11-23T17:29:53,993 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=217, ppid=215, state=RUNNABLE, hasLock=false; OpenRegionProcedure 50adbd0da757ccb80f4e38c5185fe307, server=215bb12e999f,45467,1732382620632 because future has completed 2024-11-23T17:29:53,994 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=216, ppid=214, state=RUNNABLE, hasLock=false; OpenRegionProcedure 28c8e9ee69c0ee746fd3bc8995d8ac70, server=215bb12e999f,44667,1732382620918 because future has completed 2024-11-23T17:29:53,995 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=217, resume processing ppid=215 2024-11-23T17:29:53,995 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=217, ppid=215, state=SUCCESS, hasLock=false; OpenRegionProcedure 50adbd0da757ccb80f4e38c5185fe307, server=215bb12e999f,45467,1732382620632 in 169 msec 2024-11-23T17:29:53,996 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=216, resume processing ppid=214 2024-11-23T17:29:53,996 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=215, ppid=213, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=50adbd0da757ccb80f4e38c5185fe307, ASSIGN in 324 msec 2024-11-23T17:29:53,996 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=216, ppid=214, state=SUCCESS, hasLock=false; OpenRegionProcedure 28c8e9ee69c0ee746fd3bc8995d8ac70, server=215bb12e999f,44667,1732382620918 in 171 msec 2024-11-23T17:29:53,998 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=214, resume processing ppid=213 2024-11-23T17:29:53,998 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=214, ppid=213, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=28c8e9ee69c0ee746fd3bc8995d8ac70, ASSIGN in 325 msec 2024-11-23T17:29:53,998 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T17:29:53,999 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732382993999"}]},"ts":"1732382993999"} 2024-11-23T17:29:54,000 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=ENABLED in hbase:meta 2024-11-23T17:29:54,001 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T17:29:54,001 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithChecksum jenkins: RWXCA 2024-11-23T17:29:54,004 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41065 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-11-23T17:29:54,029 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:29:54,029 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x10168b8ee290003, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:29:54,029 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45467-0x10168b8ee290001, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:29:54,029 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44667-0x10168b8ee290002, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:29:54,119 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:29:54,119 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:29:54,119 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:29:54,119 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:29:54,119 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-11-23T17:29:54,119 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-11-23T17:29:54,119 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-11-23T17:29:54,119 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-11-23T17:29:54,120 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=213, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithChecksum in 484 msec 2024-11-23T17:29:54,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=213 2024-11-23T17:29:54,266 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithChecksum completed 2024-11-23T17:29:54,266 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportWithChecksum get assigned. Timeout = 60000ms 2024-11-23T17:29:54,266 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T17:29:54,269 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44667 {}] regionserver.StoreScanner(1138): Switch to stream read (scanned=32829 bytes) of info 2024-11-23T17:29:54,270 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportWithChecksum assigned to meta. Checking AM states. 2024-11-23T17:29:54,271 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T17:29:54,271 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportWithChecksum assigned. 2024-11-23T17:29:54,271 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-23T17:29:54,273 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-11-23T17:29:54,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732382994273 (current time:1732382994273). 2024-11-23T17:29:54,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-23T17:29:54,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithChecksum VERSION not specified, setting to 2 2024-11-23T17:29:54,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-23T17:29:54,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6a1b8217, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:29:54,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.ClusterIdFetcher(90): Going to request 215bb12e999f,39621,-1 for getting cluster id 2024-11-23T17:29:54,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:29:54,275 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d' 2024-11-23T17:29:54,275 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:29:54,275 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d" 2024-11-23T17:29:54,275 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@30e20efd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:29:54,275 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [215bb12e999f,39621,-1] 2024-11-23T17:29:54,275 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:29:54,275 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:29:54,276 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46176, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:29:54,277 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@459cd7fa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:29:54,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:29:54,278 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=215bb12e999f,44667,1732382620918, seqNum=-1] 2024-11-23T17:29:54,278 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:29:54,279 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49586, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:29:54,281 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621. 2024-11-23T17:29:54,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:29:54,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:29:54,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:29:54,281 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:29:54,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@57ac9692, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:29:54,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.ClusterIdFetcher(90): Going to request 215bb12e999f,39621,-1 for getting cluster id 2024-11-23T17:29:54,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:29:54,282 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d' 2024-11-23T17:29:54,283 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:29:54,283 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d" 2024-11-23T17:29:54,283 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@313100b2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:29:54,283 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [215bb12e999f,39621,-1] 2024-11-23T17:29:54,283 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:29:54,283 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:29:54,284 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46200, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:29:54,284 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@989feab, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:29:54,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:29:54,285 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=215bb12e999f,44667,1732382620918, seqNum=-1] 2024-11-23T17:29:54,286 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:29:54,287 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49594, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:29:54,288 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithChecksum', locateType=CURRENT is [region=hbase:acl,,1732382625522.41264850465b123187f03a78b2521b39., hostname=215bb12e999f,41065,1732382621000, seqNum=2] 2024-11-23T17:29:54,288 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:29:54,289 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49942, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:29:54,291 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621. 2024-11-23T17:29:54,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor297.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:29:54,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:29:54,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:29:54,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-11-23T17:29:54,291 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:29:54,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-23T17:29:54,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure2.ProcedureExecutor(1139): Stored pid=218, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-11-23T17:29:54,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 218 2024-11-23T17:29:54,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=218 2024-11-23T17:29:54,295 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-23T17:29:54,296 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-23T17:29:54,298 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-23T17:29:54,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742283_1459 (size=161) 2024-11-23T17:29:54,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742283_1459 (size=161) 2024-11-23T17:29:54,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742283_1459 (size=161) 2024-11-23T17:29:54,324 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-23T17:29:54,324 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=219, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 28c8e9ee69c0ee746fd3bc8995d8ac70}, {pid=220, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 50adbd0da757ccb80f4e38c5185fe307}] 2024-11-23T17:29:54,325 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=220, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 50adbd0da757ccb80f4e38c5185fe307 2024-11-23T17:29:54,325 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=219, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 28c8e9ee69c0ee746fd3bc8995d8ac70 2024-11-23T17:29:54,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=218 2024-11-23T17:29:54,477 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44667 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=219 2024-11-23T17:29:54,477 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45467 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=220 2024-11-23T17:29:54,477 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,,1732382993635.28c8e9ee69c0ee746fd3bc8995d8ac70. 2024-11-23T17:29:54,477 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,1,1732382993635.50adbd0da757ccb80f4e38c5185fe307. 2024-11-23T17:29:54,477 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.HRegion(2603): Flush status journal for 28c8e9ee69c0ee746fd3bc8995d8ac70: 2024-11-23T17:29:54,477 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.HRegion(2603): Flush status journal for 50adbd0da757ccb80f4e38c5185fe307: 2024-11-23T17:29:54,477 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,,1732382993635.28c8e9ee69c0ee746fd3bc8995d8ac70. for emptySnaptb0-testExportWithChecksum completed. 2024-11-23T17:29:54,477 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,1,1732382993635.50adbd0da757ccb80f4e38c5185fe307. for emptySnaptb0-testExportWithChecksum completed. 2024-11-23T17:29:54,477 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,,1732382993635.28c8e9ee69c0ee746fd3bc8995d8ac70.' region-info for snapshot=emptySnaptb0-testExportWithChecksum 2024-11-23T17:29:54,477 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,1,1732382993635.50adbd0da757ccb80f4e38c5185fe307.' region-info for snapshot=emptySnaptb0-testExportWithChecksum 2024-11-23T17:29:54,477 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:29:54,477 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:29:54,477 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-23T17:29:54,477 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-23T17:29:54,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742285_1461 (size=68) 2024-11-23T17:29:54,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742285_1461 (size=68) 2024-11-23T17:29:54,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742285_1461 (size=68) 2024-11-23T17:29:54,494 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,,1732382993635.28c8e9ee69c0ee746fd3bc8995d8ac70. 2024-11-23T17:29:54,494 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=219 2024-11-23T17:29:54,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.HMaster(4169): Remote procedure done, pid=219 2024-11-23T17:29:54,494 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithChecksum on region 28c8e9ee69c0ee746fd3bc8995d8ac70 2024-11-23T17:29:54,494 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=219, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 28c8e9ee69c0ee746fd3bc8995d8ac70 2024-11-23T17:29:54,496 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=219, ppid=218, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 28c8e9ee69c0ee746fd3bc8995d8ac70 in 171 msec 2024-11-23T17:29:54,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742284_1460 (size=68) 2024-11-23T17:29:54,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742284_1460 (size=68) 2024-11-23T17:29:54,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742284_1460 (size=68) 2024-11-23T17:29:54,502 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,1,1732382993635.50adbd0da757ccb80f4e38c5185fe307. 2024-11-23T17:29:54,502 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=220 2024-11-23T17:29:54,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.HMaster(4169): Remote procedure done, pid=220 2024-11-23T17:29:54,503 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithChecksum on region 50adbd0da757ccb80f4e38c5185fe307 2024-11-23T17:29:54,503 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=220, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 50adbd0da757ccb80f4e38c5185fe307 2024-11-23T17:29:54,505 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=220, resume processing ppid=218 2024-11-23T17:29:54,505 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-23T17:29:54,505 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=220, ppid=218, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 50adbd0da757ccb80f4e38c5185fe307 in 179 msec 2024-11-23T17:29:54,506 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-23T17:29:54,507 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-23T17:29:54,507 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithChecksum 2024-11-23T17:29:54,509 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithChecksum 2024-11-23T17:29:54,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742286_1462 (size=543) 2024-11-23T17:29:54,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742286_1462 (size=543) 2024-11-23T17:29:54,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742286_1462 (size=543) 2024-11-23T17:29:54,549 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-23T17:29:54,553 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-23T17:29:54,553 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithChecksum to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/emptySnaptb0-testExportWithChecksum 2024-11-23T17:29:54,555 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-23T17:29:54,555 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 218 2024-11-23T17:29:54,557 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=218, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } in 263 msec 2024-11-23T17:29:54,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=218 2024-11-23T17:29:54,616 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithChecksum completed 2024-11-23T17:29:54,619 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithChecksum', row='0411fe39cf9765d2214132a39809f4062', locateType=CURRENT is [region=testtb-testExportWithChecksum,,1732382993635.28c8e9ee69c0ee746fd3bc8995d8ac70., hostname=215bb12e999f,44667,1732382620918, seqNum=2] 2024-11-23T17:29:54,620 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithChecksum', row='1c87ff4bf2cf535595e5b860c36b8d965', locateType=CURRENT is [region=testtb-testExportWithChecksum,1,1732382993635.50adbd0da757ccb80f4e38c5185fe307., hostname=215bb12e999f,45467,1732382620632, seqNum=2] 2024-11-23T17:29:54,621 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithChecksum', row='2a9f29604d4f7943d2faadfef56e1b61b', locateType=CURRENT is [region=testtb-testExportWithChecksum,1,1732382993635.50adbd0da757ccb80f4e38c5185fe307., hostname=215bb12e999f,45467,1732382620632, seqNum=2] 2024-11-23T17:29:54,622 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithChecksum', row='3f3418f255511c9bed1047984dd00e1bc', locateType=CURRENT is [region=testtb-testExportWithChecksum,1,1732382993635.50adbd0da757ccb80f4e38c5185fe307., hostname=215bb12e999f,45467,1732382620632, seqNum=2] 2024-11-23T17:29:54,626 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44667 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithChecksum,,1732382993635.28c8e9ee69c0ee746fd3bc8995d8ac70. with WAL disabled. Data may be lost in the event of a crash. 2024-11-23T17:29:54,628 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45467 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithChecksum,1,1732382993635.50adbd0da757ccb80f4e38c5185fe307. with WAL disabled. Data may be lost in the event of a crash. 2024-11-23T17:29:54,629 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-23T17:29:54,631 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithChecksum 2024-11-23T17:29:54,631 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithChecksum,,1732382993635.28c8e9ee69c0ee746fd3bc8995d8ac70. 2024-11-23T17:29:54,632 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T17:29:54,633 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-23T17:29:54,638 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-23T17:29:54,649 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-23T17:29:54,655 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-11-23T17:29:54,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732382994655 (current time:1732382994655). 2024-11-23T17:29:54,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-23T17:29:54,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithChecksum VERSION not specified, setting to 2 2024-11-23T17:29:54,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-23T17:29:54,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@12b8fb08, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:29:54,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.ClusterIdFetcher(90): Going to request 215bb12e999f,39621,-1 for getting cluster id 2024-11-23T17:29:54,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:29:54,665 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d' 2024-11-23T17:29:54,665 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:29:54,665 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d" 2024-11-23T17:29:54,666 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@174d3138, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:29:54,666 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [215bb12e999f,39621,-1] 2024-11-23T17:29:54,666 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:29:54,666 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:29:54,667 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46210, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:29:54,667 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4b919a61, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:29:54,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:29:54,668 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=215bb12e999f,44667,1732382620918, seqNum=-1] 2024-11-23T17:29:54,669 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:29:54,670 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49604, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:29:54,671 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621. 2024-11-23T17:29:54,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:29:54,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:29:54,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:29:54,672 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:29:54,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@36b49b08, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:29:54,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.ClusterIdFetcher(90): Going to request 215bb12e999f,39621,-1 for getting cluster id 2024-11-23T17:29:54,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:29:54,673 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d' 2024-11-23T17:29:54,673 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:29:54,674 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d" 2024-11-23T17:29:54,674 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1a3a3e9c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:29:54,674 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [215bb12e999f,39621,-1] 2024-11-23T17:29:54,674 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:29:54,675 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46228, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:29:54,676 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2b364e5a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:29:54,676 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:29:54,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:29:54,678 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=215bb12e999f,44667,1732382620918, seqNum=-1] 2024-11-23T17:29:54,678 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:29:54,679 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49608, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:29:54,681 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithChecksum', locateType=CURRENT is [region=hbase:acl,,1732382625522.41264850465b123187f03a78b2521b39., hostname=215bb12e999f,41065,1732382621000, seqNum=2] 2024-11-23T17:29:54,681 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:29:54,682 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49954, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:29:54,684 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621. 2024-11-23T17:29:54,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor297.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:29:54,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:29:54,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:29:54,685 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:29:54,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-11-23T17:29:54,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-23T17:29:54,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure2.ProcedureExecutor(1139): Stored pid=221, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-11-23T17:29:54,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 221 2024-11-23T17:29:54,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=221 2024-11-23T17:29:54,689 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-23T17:29:54,690 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-23T17:29:54,695 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-23T17:29:54,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742287_1463 (size=156) 2024-11-23T17:29:54,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742287_1463 (size=156) 2024-11-23T17:29:54,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742287_1463 (size=156) 2024-11-23T17:29:54,715 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-23T17:29:54,715 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=222, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 28c8e9ee69c0ee746fd3bc8995d8ac70}, {pid=223, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 50adbd0da757ccb80f4e38c5185fe307}] 2024-11-23T17:29:54,716 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=223, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 50adbd0da757ccb80f4e38c5185fe307 2024-11-23T17:29:54,716 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=222, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 28c8e9ee69c0ee746fd3bc8995d8ac70 2024-11-23T17:29:54,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=221 2024-11-23T17:29:54,867 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45467 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=223 2024-11-23T17:29:54,868 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,1,1732382993635.50adbd0da757ccb80f4e38c5185fe307. 2024-11-23T17:29:54,868 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44667 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=222 2024-11-23T17:29:54,868 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,,1732382993635.28c8e9ee69c0ee746fd3bc8995d8ac70. 2024-11-23T17:29:54,868 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HRegion(2902): Flushing 28c8e9ee69c0ee746fd3bc8995d8ac70 1/1 column families, dataSize=266 B heapSize=832 B 2024-11-23T17:29:54,868 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HRegion(2902): Flushing 50adbd0da757ccb80f4e38c5185fe307 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-11-23T17:29:54,883 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithChecksum/28c8e9ee69c0ee746fd3bc8995d8ac70/.tmp/cf/9ddace22423e4c5fb0f9929467a7a911 is 71, key is 00de0e1b0a318cdcdce0fd56b336cc6d/cf:q/1732382994626/Put/seqid=0 2024-11-23T17:29:54,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742288_1464 (size=5356) 2024-11-23T17:29:54,889 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithChecksum/50adbd0da757ccb80f4e38c5185fe307/.tmp/cf/292980e15066421aac062dce81d24959 is 71, key is 181f742844e7544ad609bdb95e629516/cf:q/1732382994628/Put/seqid=0 2024-11-23T17:29:54,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742288_1464 (size=5356) 2024-11-23T17:29:54,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742288_1464 (size=5356) 2024-11-23T17:29:54,895 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=266 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithChecksum/28c8e9ee69c0ee746fd3bc8995d8ac70/.tmp/cf/9ddace22423e4c5fb0f9929467a7a911 2024-11-23T17:29:54,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742289_1465 (size=8256) 2024-11-23T17:29:54,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742289_1465 (size=8256) 2024-11-23T17:29:54,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742289_1465 (size=8256) 2024-11-23T17:29:54,901 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.00 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithChecksum/50adbd0da757ccb80f4e38c5185fe307/.tmp/cf/292980e15066421aac062dce81d24959 2024-11-23T17:29:54,902 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithChecksum/28c8e9ee69c0ee746fd3bc8995d8ac70/.tmp/cf/9ddace22423e4c5fb0f9929467a7a911 as hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithChecksum/28c8e9ee69c0ee746fd3bc8995d8ac70/cf/9ddace22423e4c5fb0f9929467a7a911 2024-11-23T17:29:54,906 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithChecksum/50adbd0da757ccb80f4e38c5185fe307/.tmp/cf/292980e15066421aac062dce81d24959 as hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithChecksum/50adbd0da757ccb80f4e38c5185fe307/cf/292980e15066421aac062dce81d24959 2024-11-23T17:29:54,909 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithChecksum/28c8e9ee69c0ee746fd3bc8995d8ac70/cf/9ddace22423e4c5fb0f9929467a7a911, entries=4, sequenceid=6, filesize=5.2 K 2024-11-23T17:29:54,909 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithChecksum/50adbd0da757ccb80f4e38c5185fe307/cf/292980e15066421aac062dce81d24959, entries=46, sequenceid=6, filesize=8.1 K 2024-11-23T17:29:54,910 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HRegion(3140): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for 28c8e9ee69c0ee746fd3bc8995d8ac70 in 42ms, sequenceid=6, compaction requested=false 2024-11-23T17:29:54,910 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HRegion(3140): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for 50adbd0da757ccb80f4e38c5185fe307 in 42ms, sequenceid=6, compaction requested=false 2024-11-23T17:29:54,910 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithChecksum' 2024-11-23T17:29:54,910 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithChecksum' 2024-11-23T17:29:54,911 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HRegion(2603): Flush status journal for 28c8e9ee69c0ee746fd3bc8995d8ac70: 2024-11-23T17:29:54,911 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HRegion(2603): Flush status journal for 50adbd0da757ccb80f4e38c5185fe307: 2024-11-23T17:29:54,911 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,,1732382993635.28c8e9ee69c0ee746fd3bc8995d8ac70. for snaptb0-testExportWithChecksum completed. 2024-11-23T17:29:54,911 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,1,1732382993635.50adbd0da757ccb80f4e38c5185fe307. for snaptb0-testExportWithChecksum completed. 2024-11-23T17:29:54,911 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,1,1732382993635.50adbd0da757ccb80f4e38c5185fe307.' region-info for snapshot=snaptb0-testExportWithChecksum 2024-11-23T17:29:54,911 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,,1732382993635.28c8e9ee69c0ee746fd3bc8995d8ac70.' region-info for snapshot=snaptb0-testExportWithChecksum 2024-11-23T17:29:54,911 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:29:54,911 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:29:54,911 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithChecksum/28c8e9ee69c0ee746fd3bc8995d8ac70/cf/9ddace22423e4c5fb0f9929467a7a911] hfiles 2024-11-23T17:29:54,911 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithChecksum/50adbd0da757ccb80f4e38c5185fe307/cf/292980e15066421aac062dce81d24959] hfiles 2024-11-23T17:29:54,911 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithChecksum/28c8e9ee69c0ee746fd3bc8995d8ac70/cf/9ddace22423e4c5fb0f9929467a7a911 for snapshot=snaptb0-testExportWithChecksum 2024-11-23T17:29:54,911 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithChecksum/50adbd0da757ccb80f4e38c5185fe307/cf/292980e15066421aac062dce81d24959 for snapshot=snaptb0-testExportWithChecksum 2024-11-23T17:29:54,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742290_1466 (size=107) 2024-11-23T17:29:54,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742291_1467 (size=107) 2024-11-23T17:29:54,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742291_1467 (size=107) 2024-11-23T17:29:54,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742290_1466 (size=107) 2024-11-23T17:29:54,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742290_1466 (size=107) 2024-11-23T17:29:54,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742291_1467 (size=107) 2024-11-23T17:29:54,917 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,1,1732382993635.50adbd0da757ccb80f4e38c5185fe307. 2024-11-23T17:29:54,917 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=223 2024-11-23T17:29:54,917 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,,1732382993635.28c8e9ee69c0ee746fd3bc8995d8ac70. 2024-11-23T17:29:54,917 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=222 2024-11-23T17:29:54,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.HMaster(4169): Remote procedure done, pid=223 2024-11-23T17:29:54,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.HMaster(4169): Remote procedure done, pid=222 2024-11-23T17:29:54,918 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithChecksum on region 50adbd0da757ccb80f4e38c5185fe307 2024-11-23T17:29:54,918 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithChecksum on region 28c8e9ee69c0ee746fd3bc8995d8ac70 2024-11-23T17:29:54,918 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=222, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 28c8e9ee69c0ee746fd3bc8995d8ac70 2024-11-23T17:29:54,918 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=223, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 50adbd0da757ccb80f4e38c5185fe307 2024-11-23T17:29:54,920 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=222, ppid=221, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 28c8e9ee69c0ee746fd3bc8995d8ac70 in 204 msec 2024-11-23T17:29:54,921 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=223, resume processing ppid=221 2024-11-23T17:29:54,921 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-23T17:29:54,921 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=223, ppid=221, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 50adbd0da757ccb80f4e38c5185fe307 in 204 msec 2024-11-23T17:29:54,922 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-23T17:29:54,922 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-23T17:29:54,922 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithChecksum 2024-11-23T17:29:54,923 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-11-23T17:29:54,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742292_1468 (size=621) 2024-11-23T17:29:54,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742292_1468 (size=621) 2024-11-23T17:29:54,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742292_1468 (size=621) 2024-11-23T17:29:54,931 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-23T17:29:54,940 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-23T17:29:54,940 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-11-23T17:29:54,942 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-23T17:29:54,942 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 221 2024-11-23T17:29:54,943 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=221, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } in 256 msec 2024-11-23T17:29:55,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=221 2024-11-23T17:29:55,007 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithChecksum completed 2024-11-23T17:29:55,007 INFO [Time-limited test {}] snapshot.TestExportSnapshot(523): Local export destination path: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/local-export-1732382995007 2024-11-23T17:29:55,007 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/local-export-1732382995007, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/local-export-1732382995007, srcFsUri=hdfs://localhost:37605, srcDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf 2024-11-23T17:29:55,035 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:37605, inputRoot=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf 2024-11-23T17:29:55,035 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=org.apache.hadoop.fs.LocalFileSystem@ff95ab9, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/local-export-1732382995007, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/local-export-1732382995007/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-11-23T17:29:55,036 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-11-23T17:29:55,039 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/snaptb0-testExportWithChecksum to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/local-export-1732382995007/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-11-23T17:29:55,066 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:29:55,066 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:29:55,066 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:29:55,309 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-23T17:29:56,104 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/hadoop-8324971761374864687.jar 2024-11-23T17:29:56,105 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:29:56,105 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:29:56,178 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/hadoop-2268881381674583387.jar 2024-11-23T17:29:56,179 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:29:56,179 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:29:56,179 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:29:56,180 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:29:56,180 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:29:56,181 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:29:56,181 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-23T17:29:56,181 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-23T17:29:56,182 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-23T17:29:56,182 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-23T17:29:56,183 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-23T17:29:56,183 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-23T17:29:56,183 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-23T17:29:56,183 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-23T17:29:56,184 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-23T17:29:56,184 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-23T17:29:56,184 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-23T17:29:56,185 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:29:56,185 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:29:56,185 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T17:29:56,185 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:29:56,186 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:29:56,186 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T17:29:56,186 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T17:29:56,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742293_1469 (size=131440) 2024-11-23T17:29:56,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742293_1469 (size=131440) 2024-11-23T17:29:56,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742293_1469 (size=131440) 2024-11-23T17:29:56,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742294_1470 (size=4188619) 2024-11-23T17:29:56,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742294_1470 (size=4188619) 2024-11-23T17:29:56,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742294_1470 (size=4188619) 2024-11-23T17:29:56,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742295_1471 (size=1323991) 2024-11-23T17:29:56,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742295_1471 (size=1323991) 2024-11-23T17:29:56,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742295_1471 (size=1323991) 2024-11-23T17:29:57,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742296_1472 (size=903733) 2024-11-23T17:29:57,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742296_1472 (size=903733) 2024-11-23T17:29:57,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742296_1472 (size=903733) 2024-11-23T17:29:57,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742297_1473 (size=8360083) 2024-11-23T17:29:57,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742297_1473 (size=8360083) 2024-11-23T17:29:57,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742297_1473 (size=8360083) 2024-11-23T17:29:57,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742298_1474 (size=1877034) 2024-11-23T17:29:57,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742298_1474 (size=1877034) 2024-11-23T17:29:57,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742298_1474 (size=1877034) 2024-11-23T17:29:57,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742299_1475 (size=77835) 2024-11-23T17:29:57,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742299_1475 (size=77835) 2024-11-23T17:29:57,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742299_1475 (size=77835) 2024-11-23T17:29:57,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742300_1476 (size=30949) 2024-11-23T17:29:57,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742300_1476 (size=30949) 2024-11-23T17:29:57,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742300_1476 (size=30949) 2024-11-23T17:29:57,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742301_1477 (size=1597347) 2024-11-23T17:29:57,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742301_1477 (size=1597347) 2024-11-23T17:29:57,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742301_1477 (size=1597347) 2024-11-23T17:29:57,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742302_1478 (size=4695811) 2024-11-23T17:29:57,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742302_1478 (size=4695811) 2024-11-23T17:29:57,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742302_1478 (size=4695811) 2024-11-23T17:29:57,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742303_1479 (size=232957) 2024-11-23T17:29:57,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742303_1479 (size=232957) 2024-11-23T17:29:57,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742303_1479 (size=232957) 2024-11-23T17:29:57,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742304_1480 (size=127628) 2024-11-23T17:29:57,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742304_1480 (size=127628) 2024-11-23T17:29:57,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742304_1480 (size=127628) 2024-11-23T17:29:57,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742305_1481 (size=20406) 2024-11-23T17:29:57,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742305_1481 (size=20406) 2024-11-23T17:29:57,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742305_1481 (size=20406) 2024-11-23T17:29:57,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742306_1482 (size=5175431) 2024-11-23T17:29:57,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742306_1482 (size=5175431) 2024-11-23T17:29:57,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742306_1482 (size=5175431) 2024-11-23T17:29:57,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742307_1483 (size=217634) 2024-11-23T17:29:57,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742307_1483 (size=217634) 2024-11-23T17:29:57,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742307_1483 (size=217634) 2024-11-23T17:29:57,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742308_1484 (size=6424742) 2024-11-23T17:29:57,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742308_1484 (size=6424742) 2024-11-23T17:29:57,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742308_1484 (size=6424742) 2024-11-23T17:29:57,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742309_1485 (size=1832290) 2024-11-23T17:29:57,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742309_1485 (size=1832290) 2024-11-23T17:29:57,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742309_1485 (size=1832290) 2024-11-23T17:29:57,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742310_1486 (size=322274) 2024-11-23T17:29:57,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742310_1486 (size=322274) 2024-11-23T17:29:57,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742310_1486 (size=322274) 2024-11-23T17:29:57,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742311_1487 (size=503880) 2024-11-23T17:29:57,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742311_1487 (size=503880) 2024-11-23T17:29:57,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742311_1487 (size=503880) 2024-11-23T17:29:57,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742312_1488 (size=29229) 2024-11-23T17:29:57,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742312_1488 (size=29229) 2024-11-23T17:29:57,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742312_1488 (size=29229) 2024-11-23T17:29:57,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742313_1489 (size=24096) 2024-11-23T17:29:57,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742313_1489 (size=24096) 2024-11-23T17:29:57,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742313_1489 (size=24096) 2024-11-23T17:29:57,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742314_1490 (size=440957) 2024-11-23T17:29:57,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742314_1490 (size=440957) 2024-11-23T17:29:57,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742314_1490 (size=440957) 2024-11-23T17:29:57,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742315_1491 (size=111872) 2024-11-23T17:29:57,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742315_1491 (size=111872) 2024-11-23T17:29:57,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742315_1491 (size=111872) 2024-11-23T17:29:57,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742316_1492 (size=45609) 2024-11-23T17:29:57,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742316_1492 (size=45609) 2024-11-23T17:29:57,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742316_1492 (size=45609) 2024-11-23T17:29:57,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742317_1493 (size=136454) 2024-11-23T17:29:57,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742317_1493 (size=136454) 2024-11-23T17:29:57,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742317_1493 (size=136454) 2024-11-23T17:29:57,810 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-23T17:29:57,812 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportWithChecksum' hfile list 2024-11-23T17:29:57,814 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=8.1 K 2024-11-23T17:29:57,814 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=5.2 K 2024-11-23T17:29:57,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742318_1494 (size=441) 2024-11-23T17:29:57,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742318_1494 (size=441) 2024-11-23T17:29:57,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742318_1494 (size=441) 2024-11-23T17:29:57,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742319_1495 (size=21) 2024-11-23T17:29:57,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742319_1495 (size=21) 2024-11-23T17:29:57,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742319_1495 (size=21) 2024-11-23T17:29:57,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742320_1496 (size=304044) 2024-11-23T17:29:57,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742320_1496 (size=304044) 2024-11-23T17:29:57,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742320_1496 (size=304044) 2024-11-23T17:29:58,037 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-23T17:29:58,037 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-23T17:29:58,040 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732382632246_0008_000001 (auth:SIMPLE) from 127.0.0.1:46464 2024-11-23T17:29:58,056 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-0_3/usercache/jenkins/appcache/application_1732382632246_0008/container_1732382632246_0008_01_000001/launch_container.sh] 2024-11-23T17:29:58,056 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-0_3/usercache/jenkins/appcache/application_1732382632246_0008/container_1732382632246_0008_01_000001/container_tokens] 2024-11-23T17:29:58,056 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-0_3/usercache/jenkins/appcache/application_1732382632246_0008/container_1732382632246_0008_01_000001/sysfs] 2024-11-23T17:29:58,325 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732382632246_0009_000001 (auth:SIMPLE) from 127.0.0.1:34160 2024-11-23T17:29:59,806 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum 2024-11-23T17:29:59,806 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum Metrics about Tables on a single HBase RegionServer 2024-11-23T17:29:59,807 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState 2024-11-23T17:30:05,310 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-23T17:30:06,812 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732382632246_0009_000001 (auth:SIMPLE) from 127.0.0.1:57514 2024-11-23T17:30:07,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742321_1497 (size=349742) 2024-11-23T17:30:07,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742321_1497 (size=349742) 2024-11-23T17:30:07,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742321_1497 (size=349742) 2024-11-23T17:30:08,091 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T17:30:09,328 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732382632246_0009_000001 (auth:SIMPLE) from 127.0.0.1:41240 2024-11-23T17:30:09,329 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732382632246_0009_000001 (auth:SIMPLE) from 127.0.0.1:35670 2024-11-23T17:30:14,761 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-1_2/usercache/jenkins/appcache/application_1732382632246_0009/container_1732382632246_0009_01_000003/launch_container.sh] 2024-11-23T17:30:14,761 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-1_2/usercache/jenkins/appcache/application_1732382632246_0009/container_1732382632246_0009_01_000003/container_tokens] 2024-11-23T17:30:14,761 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-1_2/usercache/jenkins/appcache/application_1732382632246_0009/container_1732382632246_0009_01_000003/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithChecksum/28c8e9ee69c0ee746fd3bc8995d8ac70/cf/9ddace22423e4c5fb0f9929467a7a911 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/local-export-1732382995007/archive/data/default/testtb-testExportWithChecksum/28c8e9ee69c0ee746fd3bc8995d8ac70/cf/9ddace22423e4c5fb0f9929467a7a911. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-11-23T17:30:15,555 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-0_3/usercache/jenkins/appcache/application_1732382632246_0009/container_1732382632246_0009_01_000002/launch_container.sh] 2024-11-23T17:30:15,555 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-0_3/usercache/jenkins/appcache/application_1732382632246_0009/container_1732382632246_0009_01_000002/container_tokens] 2024-11-23T17:30:15,555 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-0_3/usercache/jenkins/appcache/application_1732382632246_0009/container_1732382632246_0009_01_000002/sysfs] 2024-11-23T17:30:15,877 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 5e511dfc454574bea858d074620b30f9, had cached 0 bytes from a total of 8190 2024-11-23T17:30:15,877 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 6f88863827b64acc564823a1044a6b2b, had cached 0 bytes from a total of 5422 2024-11-23T17:30:15,974 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732382632246_0009_000001 (auth:SIMPLE) from 127.0.0.1:36660 Error: java.io.IOException: Checksum mismatch between hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithChecksum/50adbd0da757ccb80f4e38c5185fe307/cf/292980e15066421aac062dce81d24959 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/local-export-1732382995007/archive/data/default/testtb-testExportWithChecksum/50adbd0da757ccb80f4e38c5185fe307/cf/292980e15066421aac062dce81d24959. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-11-23T17:30:16,981 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732382632246_0009_000001 (auth:SIMPLE) from 127.0.0.1:36666 2024-11-23T17:30:18,706 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1732382632246_0009_01_000006 while processing FINISH_CONTAINERS event 2024-11-23T17:30:19,543 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1732382632246_0009_01_000007 while processing FINISH_CONTAINERS event 2024-11-23T17:30:19,593 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-0_0/usercache/jenkins/appcache/application_1732382632246_0009/container_1732382632246_0009_01_000004/launch_container.sh] 2024-11-23T17:30:19,593 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-0_0/usercache/jenkins/appcache/application_1732382632246_0009/container_1732382632246_0009_01_000004/container_tokens] 2024-11-23T17:30:19,593 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-0_0/usercache/jenkins/appcache/application_1732382632246_0009/container_1732382632246_0009_01_000004/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithChecksum/28c8e9ee69c0ee746fd3bc8995d8ac70/cf/9ddace22423e4c5fb0f9929467a7a911 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/local-export-1732382995007/archive/data/default/testtb-testExportWithChecksum/28c8e9ee69c0ee746fd3bc8995d8ac70/cf/9ddace22423e4c5fb0f9929467a7a911. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-11-23T17:30:20,581 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-0_2/usercache/jenkins/appcache/application_1732382632246_0009/container_1732382632246_0009_01_000005/launch_container.sh] 2024-11-23T17:30:20,582 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-0_2/usercache/jenkins/appcache/application_1732382632246_0009/container_1732382632246_0009_01_000005/container_tokens] 2024-11-23T17:30:20,582 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-0_2/usercache/jenkins/appcache/application_1732382632246_0009/container_1732382632246_0009_01_000005/sysfs] 2024-11-23T17:30:20,995 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732382632246_0009_000001 (auth:SIMPLE) from 127.0.0.1:36680 Error: java.io.IOException: Checksum mismatch between hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithChecksum/50adbd0da757ccb80f4e38c5185fe307/cf/292980e15066421aac062dce81d24959 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/local-export-1732382995007/archive/data/default/testtb-testExportWithChecksum/50adbd0da757ccb80f4e38c5185fe307/cf/292980e15066421aac062dce81d24959. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-11-23T17:30:22,021 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732382632246_0009_000001 (auth:SIMPLE) from 127.0.0.1:36810 2024-11-23T17:30:23,708 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1732382632246_0009_01_000010 while processing FINISH_CONTAINERS event 2024-11-23T17:30:24,569 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1732382632246_0009_01_000011 while processing FINISH_CONTAINERS event Error: java.io.IOException: Checksum mismatch between hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithChecksum/28c8e9ee69c0ee746fd3bc8995d8ac70/cf/9ddace22423e4c5fb0f9929467a7a911 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/local-export-1732382995007/archive/data/default/testtb-testExportWithChecksum/28c8e9ee69c0ee746fd3bc8995d8ac70/cf/9ddace22423e4c5fb0f9929467a7a911. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-11-23T17:30:27,977 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-0_0/usercache/jenkins/appcache/application_1732382632246_0009/container_1732382632246_0009_01_000009/launch_container.sh] 2024-11-23T17:30:27,977 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-0_0/usercache/jenkins/appcache/application_1732382632246_0009/container_1732382632246_0009_01_000009/container_tokens] 2024-11-23T17:30:27,977 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-0_0/usercache/jenkins/appcache/application_1732382632246_0009/container_1732382632246_0009_01_000009/sysfs] 2024-11-23T17:30:28,031 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732382632246_0009_000001 (auth:SIMPLE) from 127.0.0.1:36822 Error: java.io.IOException: Checksum mismatch between hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithChecksum/50adbd0da757ccb80f4e38c5185fe307/cf/292980e15066421aac062dce81d24959 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/local-export-1732382995007/archive/data/default/testtb-testExportWithChecksum/50adbd0da757ccb80f4e38c5185fe307/cf/292980e15066421aac062dce81d24959. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-11-23T17:30:29,048 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732382632246_0009_000001 (auth:SIMPLE) from 127.0.0.1:47438 2024-11-23T17:30:30,961 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1732382632246_0009_01_000014 while processing FINISH_CONTAINERS event 2024-11-23T17:30:31,380 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-0_3/usercache/jenkins/appcache/application_1732382632246_0009/container_1732382632246_0009_01_000008/launch_container.sh] 2024-11-23T17:30:31,380 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-0_3/usercache/jenkins/appcache/application_1732382632246_0009/container_1732382632246_0009_01_000008/container_tokens] 2024-11-23T17:30:31,380 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-0_3/usercache/jenkins/appcache/application_1732382632246_0009/container_1732382632246_0009_01_000008/sysfs] 2024-11-23T17:30:32,293 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 41264850465b123187f03a78b2521b39, had cached 0 bytes from a total of 5595 2024-11-23T17:30:32,711 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1732382632246_0009_01_000015 while processing FINISH_CONTAINERS event 2024-11-23T17:30:35,118 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732382632246_0009_000001 (auth:SIMPLE) from 127.0.0.1:46788 2024-11-23T17:30:35,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742322_1498 (size=30173) 2024-11-23T17:30:35,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742322_1498 (size=30173) 2024-11-23T17:30:35,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742322_1498 (size=30173) 2024-11-23T17:30:35,280 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1732382632246_0009_01_000013 is : 143 2024-11-23T17:30:35,357 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-1_0/usercache/jenkins/appcache/application_1732382632246_0009/container_1732382632246_0009_01_000013/launch_container.sh] 2024-11-23T17:30:35,357 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-1_0/usercache/jenkins/appcache/application_1732382632246_0009/container_1732382632246_0009_01_000013/container_tokens] 2024-11-23T17:30:35,362 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-1_0/usercache/jenkins/appcache/application_1732382632246_0009/container_1732382632246_0009_01_000013/sysfs] 2024-11-23T17:30:35,388 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-0_1/usercache/jenkins/appcache/application_1732382632246_0009/container_1732382632246_0009_01_000012/launch_container.sh] 2024-11-23T17:30:35,388 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-0_1/usercache/jenkins/appcache/application_1732382632246_0009/container_1732382632246_0009_01_000012/container_tokens] 2024-11-23T17:30:35,388 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-0_1/usercache/jenkins/appcache/application_1732382632246_0009/container_1732382632246_0009_01_000012/sysfs] 2024-11-23T17:30:35,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742323_1499 (size=460) 2024-11-23T17:30:35,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742323_1499 (size=460) 2024-11-23T17:30:35,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742323_1499 (size=460) 2024-11-23T17:30:35,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742324_1500 (size=30173) 2024-11-23T17:30:35,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742324_1500 (size=30173) 2024-11-23T17:30:35,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742324_1500 (size=30173) 2024-11-23T17:30:35,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742325_1501 (size=349742) 2024-11-23T17:30:35,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742325_1501 (size=349742) 2024-11-23T17:30:35,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742325_1501 (size=349742) 2024-11-23T17:30:37,448 ERROR [Time-limited test {}] snapshot.ExportSnapshot(1239): Snapshot export failed org.apache.hadoop.hbase.snapshot.ExportSnapshotException: Task failed task_1732382632246_0009_m_000001 Job failed as tasks failed. failedMaps:1 failedReduces:0 killedMaps:0 killedReduces: 0 at org.apache.hadoop.hbase.snapshot.ExportSnapshot.runCopyJob(ExportSnapshot.java:947) ~[classes/:?] at org.apache.hadoop.hbase.snapshot.ExportSnapshot.doWork(ExportSnapshot.java:1216) ~[classes/:?] at org.apache.hadoop.hbase.util.AbstractHBaseTool.run(AbstractHBaseTool.java:150) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:82) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.runExportSnapshot(TestExportSnapshot.java:570) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportFileSystemState(TestExportSnapshot.java:400) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportWithChecksum(TestExportSnapshot.java:285) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T17:30:37,450 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732383037449 2024-11-23T17:30:37,450 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:37605, tgtDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732383037449, rawTgtDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732383037449, srcFsUri=hdfs://localhost:37605, srcDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf 2024-11-23T17:30:37,522 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:37605, inputRoot=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf 2024-11-23T17:30:37,522 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1325804072_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732383037449, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732383037449/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-11-23T17:30:37,568 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-11-23T17:30:37,605 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/snaptb0-testExportWithChecksum to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732383037449/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-11-23T17:30:37,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742326_1502 (size=156) 2024-11-23T17:30:37,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742326_1502 (size=156) 2024-11-23T17:30:37,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742326_1502 (size=156) 2024-11-23T17:30:37,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742327_1503 (size=621) 2024-11-23T17:30:37,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742327_1503 (size=621) 2024-11-23T17:30:37,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742327_1503 (size=621) 2024-11-23T17:30:38,089 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:30:38,089 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:30:38,089 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:30:38,092 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T17:30:38,980 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 28c8e9ee69c0ee746fd3bc8995d8ac70, had cached 0 bytes from a total of 5356 2024-11-23T17:30:38,980 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 50adbd0da757ccb80f4e38c5185fe307, had cached 0 bytes from a total of 8256 2024-11-23T17:30:39,422 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/hadoop-8741749214606277458.jar 2024-11-23T17:30:39,422 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:30:39,423 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:30:39,508 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/hadoop-11002502581069694972.jar 2024-11-23T17:30:39,509 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:30:39,509 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:30:39,510 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:30:39,510 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:30:39,511 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:30:39,511 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:30:39,511 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-23T17:30:39,512 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-23T17:30:39,512 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-23T17:30:39,513 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-23T17:30:39,513 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-23T17:30:39,513 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-23T17:30:39,514 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-23T17:30:39,514 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-23T17:30:39,514 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-23T17:30:39,515 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-23T17:30:39,515 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-23T17:30:39,516 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:30:39,517 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:30:39,517 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T17:30:39,517 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:30:39,518 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:30:39,518 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T17:30:39,518 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T17:30:39,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742328_1504 (size=131440) 2024-11-23T17:30:39,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742328_1504 (size=131440) 2024-11-23T17:30:39,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742328_1504 (size=131440) 2024-11-23T17:30:39,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742329_1505 (size=4188619) 2024-11-23T17:30:39,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742329_1505 (size=4188619) 2024-11-23T17:30:39,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742329_1505 (size=4188619) 2024-11-23T17:30:39,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742330_1506 (size=1323991) 2024-11-23T17:30:39,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742330_1506 (size=1323991) 2024-11-23T17:30:39,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742330_1506 (size=1323991) 2024-11-23T17:30:39,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742331_1507 (size=903733) 2024-11-23T17:30:39,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742331_1507 (size=903733) 2024-11-23T17:30:39,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742331_1507 (size=903733) 2024-11-23T17:30:39,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742332_1508 (size=8360083) 2024-11-23T17:30:39,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742332_1508 (size=8360083) 2024-11-23T17:30:39,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742332_1508 (size=8360083) 2024-11-23T17:30:39,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742333_1509 (size=6424742) 2024-11-23T17:30:39,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742333_1509 (size=6424742) 2024-11-23T17:30:39,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742333_1509 (size=6424742) 2024-11-23T17:30:39,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742334_1510 (size=1877034) 2024-11-23T17:30:39,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742334_1510 (size=1877034) 2024-11-23T17:30:39,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742334_1510 (size=1877034) 2024-11-23T17:30:39,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742335_1511 (size=77835) 2024-11-23T17:30:39,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742335_1511 (size=77835) 2024-11-23T17:30:39,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742335_1511 (size=77835) 2024-11-23T17:30:39,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742336_1512 (size=30949) 2024-11-23T17:30:39,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742336_1512 (size=30949) 2024-11-23T17:30:39,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742336_1512 (size=30949) 2024-11-23T17:30:39,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742337_1513 (size=1597347) 2024-11-23T17:30:39,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742337_1513 (size=1597347) 2024-11-23T17:30:39,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742337_1513 (size=1597347) 2024-11-23T17:30:40,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742338_1514 (size=4695811) 2024-11-23T17:30:40,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742338_1514 (size=4695811) 2024-11-23T17:30:40,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742338_1514 (size=4695811) 2024-11-23T17:30:40,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742339_1515 (size=232957) 2024-11-23T17:30:40,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742339_1515 (size=232957) 2024-11-23T17:30:40,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742339_1515 (size=232957) 2024-11-23T17:30:40,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742340_1516 (size=127628) 2024-11-23T17:30:40,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742340_1516 (size=127628) 2024-11-23T17:30:40,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742340_1516 (size=127628) 2024-11-23T17:30:40,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742341_1517 (size=20406) 2024-11-23T17:30:40,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742341_1517 (size=20406) 2024-11-23T17:30:40,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742341_1517 (size=20406) 2024-11-23T17:30:40,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742342_1518 (size=5175431) 2024-11-23T17:30:40,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742342_1518 (size=5175431) 2024-11-23T17:30:40,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742342_1518 (size=5175431) 2024-11-23T17:30:40,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742343_1519 (size=440957) 2024-11-23T17:30:40,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742343_1519 (size=440957) 2024-11-23T17:30:40,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742343_1519 (size=440957) 2024-11-23T17:30:41,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742344_1520 (size=217634) 2024-11-23T17:30:41,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742344_1520 (size=217634) 2024-11-23T17:30:41,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742344_1520 (size=217634) 2024-11-23T17:30:41,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742345_1521 (size=1832290) 2024-11-23T17:30:41,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742345_1521 (size=1832290) 2024-11-23T17:30:41,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742345_1521 (size=1832290) 2024-11-23T17:30:41,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742346_1522 (size=322274) 2024-11-23T17:30:41,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742346_1522 (size=322274) 2024-11-23T17:30:41,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742346_1522 (size=322274) 2024-11-23T17:30:41,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742347_1523 (size=503880) 2024-11-23T17:30:41,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742347_1523 (size=503880) 2024-11-23T17:30:41,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742347_1523 (size=503880) 2024-11-23T17:30:42,022 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732382632246_0009_000001 (auth:SIMPLE) from 127.0.0.1:36190 2024-11-23T17:30:42,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742348_1524 (size=29229) 2024-11-23T17:30:42,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742348_1524 (size=29229) 2024-11-23T17:30:42,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742348_1524 (size=29229) 2024-11-23T17:30:42,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742349_1525 (size=24096) 2024-11-23T17:30:42,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742349_1525 (size=24096) 2024-11-23T17:30:42,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742349_1525 (size=24096) 2024-11-23T17:30:42,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742350_1526 (size=111872) 2024-11-23T17:30:42,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742350_1526 (size=111872) 2024-11-23T17:30:42,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742350_1526 (size=111872) 2024-11-23T17:30:42,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742351_1527 (size=45609) 2024-11-23T17:30:42,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742351_1527 (size=45609) 2024-11-23T17:30:42,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742351_1527 (size=45609) 2024-11-23T17:30:42,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742352_1528 (size=136454) 2024-11-23T17:30:42,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742352_1528 (size=136454) 2024-11-23T17:30:42,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742352_1528 (size=136454) 2024-11-23T17:30:42,865 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-23T17:30:42,889 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportWithChecksum' hfile list 2024-11-23T17:30:42,899 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=8.1 K 2024-11-23T17:30:42,899 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=5.2 K 2024-11-23T17:30:43,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742353_1529 (size=441) 2024-11-23T17:30:43,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742353_1529 (size=441) 2024-11-23T17:30:43,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742353_1529 (size=441) 2024-11-23T17:30:43,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742354_1530 (size=21) 2024-11-23T17:30:43,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742354_1530 (size=21) 2024-11-23T17:30:43,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742354_1530 (size=21) 2024-11-23T17:30:43,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742355_1531 (size=303998) 2024-11-23T17:30:43,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742355_1531 (size=303998) 2024-11-23T17:30:43,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742355_1531 (size=303998) 2024-11-23T17:30:43,271 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-23T17:30:43,272 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-23T17:30:43,329 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732382632246_0010_000001 (auth:SIMPLE) from 127.0.0.1:34844 2024-11-23T17:30:45,528 DEBUG [master/215bb12e999f:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 28c8e9ee69c0ee746fd3bc8995d8ac70 changed from -1.0 to 0.0, refreshing cache 2024-11-23T17:30:45,528 DEBUG [master/215bb12e999f:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 50adbd0da757ccb80f4e38c5185fe307 changed from -1.0 to 0.0, refreshing cache 2024-11-23T17:30:47,161 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-1_0/usercache/jenkins/appcache/application_1732382632246_0009/container_1732382632246_0009_01_000001/launch_container.sh] 2024-11-23T17:30:47,162 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-1_0/usercache/jenkins/appcache/application_1732382632246_0009/container_1732382632246_0009_01_000001/container_tokens] 2024-11-23T17:30:47,162 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-1_0/usercache/jenkins/appcache/application_1732382632246_0009/container_1732382632246_0009_01_000001/sysfs] 2024-11-23T17:30:54,562 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732382632246_0010_000001 (auth:SIMPLE) from 127.0.0.1:55822 2024-11-23T17:30:55,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742356_1532 (size=349696) 2024-11-23T17:30:55,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742356_1532 (size=349696) 2024-11-23T17:30:55,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742356_1532 (size=349696) 2024-11-23T17:30:57,105 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732382632246_0010_000001 (auth:SIMPLE) from 127.0.0.1:38128 2024-11-23T17:30:57,109 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732382632246_0010_000001 (auth:SIMPLE) from 127.0.0.1:50330 2024-11-23T17:31:00,877 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 5e511dfc454574bea858d074620b30f9, had cached 0 bytes from a total of 8190 2024-11-23T17:31:00,877 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 6f88863827b64acc564823a1044a6b2b, had cached 0 bytes from a total of 5422 2024-11-23T17:31:01,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742357_1533 (size=5356) 2024-11-23T17:31:01,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742357_1533 (size=5356) 2024-11-23T17:31:01,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742357_1533 (size=5356) 2024-11-23T17:31:02,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742359_1535 (size=8256) 2024-11-23T17:31:02,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742359_1535 (size=8256) 2024-11-23T17:31:02,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742359_1535 (size=8256) 2024-11-23T17:31:02,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742358_1534 (size=22153) 2024-11-23T17:31:02,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742358_1534 (size=22153) 2024-11-23T17:31:02,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742358_1534 (size=22153) 2024-11-23T17:31:02,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742360_1536 (size=462) 2024-11-23T17:31:02,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742360_1536 (size=462) 2024-11-23T17:31:02,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742360_1536 (size=462) 2024-11-23T17:31:02,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742361_1537 (size=22153) 2024-11-23T17:31:02,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742361_1537 (size=22153) 2024-11-23T17:31:02,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742361_1537 (size=22153) 2024-11-23T17:31:02,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742362_1538 (size=349696) 2024-11-23T17:31:02,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742362_1538 (size=349696) 2024-11-23T17:31:02,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742362_1538 (size=349696) 2024-11-23T17:31:02,193 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732382632246_0010_000001 (auth:SIMPLE) from 127.0.0.1:46776 2024-11-23T17:31:02,205 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1732382632246_0010_01_000002 is : 143 2024-11-23T17:31:02,214 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-1_1/usercache/jenkins/appcache/application_1732382632246_0010/container_1732382632246_0010_01_000002/launch_container.sh] 2024-11-23T17:31:02,214 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-1_1/usercache/jenkins/appcache/application_1732382632246_0010/container_1732382632246_0010_01_000002/container_tokens] 2024-11-23T17:31:02,214 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-1_1/usercache/jenkins/appcache/application_1732382632246_0010/container_1732382632246_0010_01_000002/sysfs] 2024-11-23T17:31:03,471 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-11-23T17:31:03,472 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-11-23T17:31:03,476 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testExportWithChecksum 2024-11-23T17:31:03,476 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-11-23T17:31:03,477 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-11-23T17:31:03,477 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1325804072_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/snaptb0-testExportWithChecksum at hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-11-23T17:31:03,477 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/snaptb0-testExportWithChecksum/.snapshotinfo 2024-11-23T17:31:03,477 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/snaptb0-testExportWithChecksum/data.manifest 2024-11-23T17:31:03,477 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1325804072_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732383037449/.hbase-snapshot/snaptb0-testExportWithChecksum at hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732383037449/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-11-23T17:31:03,477 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732383037449/.hbase-snapshot/snaptb0-testExportWithChecksum/.snapshotinfo 2024-11-23T17:31:03,478 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732383037449/.hbase-snapshot/snaptb0-testExportWithChecksum/data.manifest 2024-11-23T17:31:03,482 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportWithChecksum 2024-11-23T17:31:03,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] procedure2.ProcedureExecutor(1139): Stored pid=224, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithChecksum 2024-11-23T17:31:03,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=224 2024-11-23T17:31:03,486 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732383063486"}]},"ts":"1732383063486"} 2024-11-23T17:31:03,488 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=DISABLING in hbase:meta 2024-11-23T17:31:03,488 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithChecksum to state=DISABLING 2024-11-23T17:31:03,488 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=225, ppid=224, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithChecksum}] 2024-11-23T17:31:03,490 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=226, ppid=225, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=28c8e9ee69c0ee746fd3bc8995d8ac70, UNASSIGN}, {pid=227, ppid=225, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=50adbd0da757ccb80f4e38c5185fe307, UNASSIGN}] 2024-11-23T17:31:03,491 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=227, ppid=225, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=50adbd0da757ccb80f4e38c5185fe307, UNASSIGN 2024-11-23T17:31:03,491 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=226, ppid=225, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=28c8e9ee69c0ee746fd3bc8995d8ac70, UNASSIGN 2024-11-23T17:31:03,491 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=226 updating hbase:meta row=28c8e9ee69c0ee746fd3bc8995d8ac70, regionState=CLOSING, regionLocation=215bb12e999f,44667,1732382620918 2024-11-23T17:31:03,491 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=227 updating hbase:meta row=50adbd0da757ccb80f4e38c5185fe307, regionState=CLOSING, regionLocation=215bb12e999f,45467,1732382620632 2024-11-23T17:31:03,493 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=227, ppid=225, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=50adbd0da757ccb80f4e38c5185fe307, UNASSIGN because future has completed 2024-11-23T17:31:03,493 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T17:31:03,493 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=228, ppid=227, state=RUNNABLE, hasLock=false; CloseRegionProcedure 50adbd0da757ccb80f4e38c5185fe307, server=215bb12e999f,45467,1732382620632}] 2024-11-23T17:31:03,493 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=226, ppid=225, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=28c8e9ee69c0ee746fd3bc8995d8ac70, UNASSIGN because future has completed 2024-11-23T17:31:03,494 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T17:31:03,494 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=229, ppid=226, state=RUNNABLE, hasLock=false; CloseRegionProcedure 28c8e9ee69c0ee746fd3bc8995d8ac70, server=215bb12e999f,44667,1732382620918}] 2024-11-23T17:31:03,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=224 2024-11-23T17:31:03,647 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] handler.UnassignRegionHandler(122): Close 50adbd0da757ccb80f4e38c5185fe307 2024-11-23T17:31:03,647 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T17:31:03,648 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=229}] handler.UnassignRegionHandler(122): Close 28c8e9ee69c0ee746fd3bc8995d8ac70 2024-11-23T17:31:03,648 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=229}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T17:31:03,648 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=229}] regionserver.HRegion(1722): Closing 28c8e9ee69c0ee746fd3bc8995d8ac70, disabling compactions & flushes 2024-11-23T17:31:03,648 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(1722): Closing 50adbd0da757ccb80f4e38c5185fe307, disabling compactions & flushes 2024-11-23T17:31:03,648 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=229}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,,1732382993635.28c8e9ee69c0ee746fd3bc8995d8ac70. 2024-11-23T17:31:03,648 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,1,1732382993635.50adbd0da757ccb80f4e38c5185fe307. 2024-11-23T17:31:03,648 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=229}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,,1732382993635.28c8e9ee69c0ee746fd3bc8995d8ac70. 2024-11-23T17:31:03,648 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,1,1732382993635.50adbd0da757ccb80f4e38c5185fe307. 2024-11-23T17:31:03,648 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=229}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,,1732382993635.28c8e9ee69c0ee746fd3bc8995d8ac70. after waiting 0 ms 2024-11-23T17:31:03,648 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,1,1732382993635.50adbd0da757ccb80f4e38c5185fe307. after waiting 0 ms 2024-11-23T17:31:03,648 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=229}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,,1732382993635.28c8e9ee69c0ee746fd3bc8995d8ac70. 2024-11-23T17:31:03,648 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,1,1732382993635.50adbd0da757ccb80f4e38c5185fe307. 2024-11-23T17:31:03,656 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithChecksum/50adbd0da757ccb80f4e38c5185fe307/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-23T17:31:03,656 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=229}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithChecksum/28c8e9ee69c0ee746fd3bc8995d8ac70/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-23T17:31:03,656 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=229}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:31:03,656 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:31:03,656 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=229}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,,1732382993635.28c8e9ee69c0ee746fd3bc8995d8ac70. 2024-11-23T17:31:03,656 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,1,1732382993635.50adbd0da757ccb80f4e38c5185fe307. 2024-11-23T17:31:03,656 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=229}] regionserver.HRegion(1676): Region close journal for 28c8e9ee69c0ee746fd3bc8995d8ac70: Waiting for close lock at 1732383063648Running coprocessor pre-close hooks at 1732383063648Disabling compacts and flushes for region at 1732383063648Disabling writes for close at 1732383063648Writing region close event to WAL at 1732383063651 (+3 ms)Running coprocessor post-close hooks at 1732383063656 (+5 ms)Closed at 1732383063656 2024-11-23T17:31:03,656 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(1676): Region close journal for 50adbd0da757ccb80f4e38c5185fe307: Waiting for close lock at 1732383063648Running coprocessor pre-close hooks at 1732383063648Disabling compacts and flushes for region at 1732383063648Disabling writes for close at 1732383063648Writing region close event to WAL at 1732383063650 (+2 ms)Running coprocessor post-close hooks at 1732383063656 (+6 ms)Closed at 1732383063656 2024-11-23T17:31:03,659 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=229}] handler.UnassignRegionHandler(157): Closed 28c8e9ee69c0ee746fd3bc8995d8ac70 2024-11-23T17:31:03,659 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=226 updating hbase:meta row=28c8e9ee69c0ee746fd3bc8995d8ac70, regionState=CLOSED 2024-11-23T17:31:03,660 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] handler.UnassignRegionHandler(157): Closed 50adbd0da757ccb80f4e38c5185fe307 2024-11-23T17:31:03,660 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=227 updating hbase:meta row=50adbd0da757ccb80f4e38c5185fe307, regionState=CLOSED 2024-11-23T17:31:03,661 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=229, ppid=226, state=RUNNABLE, hasLock=false; CloseRegionProcedure 28c8e9ee69c0ee746fd3bc8995d8ac70, server=215bb12e999f,44667,1732382620918 because future has completed 2024-11-23T17:31:03,665 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=228, ppid=227, state=RUNNABLE, hasLock=false; CloseRegionProcedure 50adbd0da757ccb80f4e38c5185fe307, server=215bb12e999f,45467,1732382620632 because future has completed 2024-11-23T17:31:03,667 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=229, resume processing ppid=226 2024-11-23T17:31:03,668 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=228, resume processing ppid=227 2024-11-23T17:31:03,668 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=229, ppid=226, state=SUCCESS, hasLock=false; CloseRegionProcedure 28c8e9ee69c0ee746fd3bc8995d8ac70, server=215bb12e999f,44667,1732382620918 in 171 msec 2024-11-23T17:31:03,668 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=226, ppid=225, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=28c8e9ee69c0ee746fd3bc8995d8ac70, UNASSIGN in 177 msec 2024-11-23T17:31:03,668 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=228, ppid=227, state=SUCCESS, hasLock=false; CloseRegionProcedure 50adbd0da757ccb80f4e38c5185fe307, server=215bb12e999f,45467,1732382620632 in 172 msec 2024-11-23T17:31:03,669 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=227, resume processing ppid=225 2024-11-23T17:31:03,669 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=227, ppid=225, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=50adbd0da757ccb80f4e38c5185fe307, UNASSIGN in 178 msec 2024-11-23T17:31:03,671 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=225, resume processing ppid=224 2024-11-23T17:31:03,671 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=225, ppid=224, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithChecksum in 182 msec 2024-11-23T17:31:03,673 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732383063672"}]},"ts":"1732383063672"} 2024-11-23T17:31:03,674 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=DISABLED in hbase:meta 2024-11-23T17:31:03,674 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithChecksum to state=DISABLED 2024-11-23T17:31:03,676 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=224, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithChecksum in 193 msec 2024-11-23T17:31:03,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=224 2024-11-23T17:31:03,807 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithChecksum completed 2024-11-23T17:31:03,809 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportWithChecksum 2024-11-23T17:31:03,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] procedure2.ProcedureExecutor(1139): Stored pid=230, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-23T17:31:03,813 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=230, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-23T17:31:03,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithChecksum 2024-11-23T17:31:03,815 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=230, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-23T17:31:03,818 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41065 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithChecksum 2024-11-23T17:31:03,819 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithChecksum/28c8e9ee69c0ee746fd3bc8995d8ac70 2024-11-23T17:31:03,819 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithChecksum/50adbd0da757ccb80f4e38c5185fe307 2024-11-23T17:31:03,820 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithChecksum/50adbd0da757ccb80f4e38c5185fe307/cf, FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithChecksum/50adbd0da757ccb80f4e38c5185fe307/recovered.edits] 2024-11-23T17:31:03,820 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithChecksum/28c8e9ee69c0ee746fd3bc8995d8ac70/cf, FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithChecksum/28c8e9ee69c0ee746fd3bc8995d8ac70/recovered.edits] 2024-11-23T17:31:03,842 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithChecksum/28c8e9ee69c0ee746fd3bc8995d8ac70/cf/9ddace22423e4c5fb0f9929467a7a911 to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/archive/data/default/testtb-testExportWithChecksum/28c8e9ee69c0ee746fd3bc8995d8ac70/cf/9ddace22423e4c5fb0f9929467a7a911 2024-11-23T17:31:03,842 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithChecksum/50adbd0da757ccb80f4e38c5185fe307/cf/292980e15066421aac062dce81d24959 to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/archive/data/default/testtb-testExportWithChecksum/50adbd0da757ccb80f4e38c5185fe307/cf/292980e15066421aac062dce81d24959 2024-11-23T17:31:03,846 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithChecksum/28c8e9ee69c0ee746fd3bc8995d8ac70/recovered.edits/9.seqid to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/archive/data/default/testtb-testExportWithChecksum/28c8e9ee69c0ee746fd3bc8995d8ac70/recovered.edits/9.seqid 2024-11-23T17:31:03,846 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithChecksum/50adbd0da757ccb80f4e38c5185fe307/recovered.edits/9.seqid to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/archive/data/default/testtb-testExportWithChecksum/50adbd0da757ccb80f4e38c5185fe307/recovered.edits/9.seqid 2024-11-23T17:31:03,847 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithChecksum/28c8e9ee69c0ee746fd3bc8995d8ac70 2024-11-23T17:31:03,848 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportWithChecksum/50adbd0da757ccb80f4e38c5185fe307 2024-11-23T17:31:03,848 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithChecksum regions 2024-11-23T17:31:03,850 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=230, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-23T17:31:03,853 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithChecksum from hbase:meta 2024-11-23T17:31:03,855 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithChecksum' descriptor. 2024-11-23T17:31:03,856 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=230, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-23T17:31:03,856 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithChecksum' from region states. 2024-11-23T17:31:03,856 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum,,1732382993635.28c8e9ee69c0ee746fd3bc8995d8ac70.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732383063856"}]},"ts":"9223372036854775807"} 2024-11-23T17:31:03,856 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum,1,1732382993635.50adbd0da757ccb80f4e38c5185fe307.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732383063856"}]},"ts":"9223372036854775807"} 2024-11-23T17:31:03,859 INFO [PEWorker-2 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-23T17:31:03,859 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 28c8e9ee69c0ee746fd3bc8995d8ac70, NAME => 'testtb-testExportWithChecksum,,1732382993635.28c8e9ee69c0ee746fd3bc8995d8ac70.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 50adbd0da757ccb80f4e38c5185fe307, NAME => 'testtb-testExportWithChecksum,1,1732382993635.50adbd0da757ccb80f4e38c5185fe307.', STARTKEY => '1', ENDKEY => ''}] 2024-11-23T17:31:03,859 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithChecksum' as deleted. 2024-11-23T17:31:03,859 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732383063859"}]},"ts":"9223372036854775807"} 2024-11-23T17:31:03,860 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithChecksum state from META 2024-11-23T17:31:03,861 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=230, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-23T17:31:03,862 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=230, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithChecksum in 52 msec 2024-11-23T17:31:03,908 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44667-0x10168b8ee290002, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-23T17:31:03,908 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x10168b8ee290003, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-23T17:31:03,908 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-23T17:31:03,908 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45467-0x10168b8ee290001, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-23T17:31:03,908 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-11-23T17:31:03,908 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-11-23T17:31:03,908 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-11-23T17:31:03,908 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-11-23T17:31:03,918 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x10168b8ee290003, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-23T17:31:03,918 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45467-0x10168b8ee290001, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-23T17:31:03,918 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44667-0x10168b8ee290002, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-23T17:31:03,918 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-23T17:31:03,918 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45467-0x10168b8ee290001, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:31:03,918 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x10168b8ee290003, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:31:03,918 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44667-0x10168b8ee290002, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:31:03,918 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:31:03,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=230 2024-11-23T17:31:03,919 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:31:03,919 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:31:03,919 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:31:03,919 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:31:03,919 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithChecksum 2024-11-23T17:31:03,920 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithChecksum completed 2024-11-23T17:31:03,925 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithChecksum" type: DISABLED 2024-11-23T17:31:03,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithChecksum 2024-11-23T17:31:03,928 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithChecksum" type: DISABLED 2024-11-23T17:31:03,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithChecksum 2024-11-23T17:31:03,951 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportWithChecksum Thread=813 (was 819), OpenFileDescriptor=824 (was 820) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1120 (was 978) - SystemLoadAverage LEAK? -, ProcessCount=24 (was 17) - ProcessCount LEAK? -, AvailableMemoryMB=809 (was 1871) 2024-11-23T17:31:03,951 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=813 is superior to 500 2024-11-23T17:31:03,969 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithSkipTmp Thread=813, OpenFileDescriptor=824, MaxFileDescriptor=1048576, SystemLoadAverage=1120, ProcessCount=24, AvailableMemoryMB=808 2024-11-23T17:31:03,969 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=813 is superior to 500 2024-11-23T17:31:03,971 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-23T17:31:03,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] procedure2.ProcedureExecutor(1139): Stored pid=231, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-23T17:31:03,973 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=231, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_PRE_OPERATION 2024-11-23T17:31:03,973 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:31:03,973 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithSkipTmp" procId is: 231 2024-11-23T17:31:03,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=231 2024-11-23T17:31:03,975 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=231, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-23T17:31:03,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742363_1539 (size=418) 2024-11-23T17:31:03,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742363_1539 (size=418) 2024-11-23T17:31:03,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742363_1539 (size=418) 2024-11-23T17:31:03,982 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => d202495da3d84797e04c2f9bfdced9fd, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1732383063971.d202495da3d84797e04c2f9bfdced9fd.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf 2024-11-23T17:31:03,982 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 68752a74e870a82bdc08e1d67f58f487, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1732383063971.68752a74e870a82bdc08e1d67f58f487.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf 2024-11-23T17:31:03,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742364_1540 (size=79) 2024-11-23T17:31:03,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742364_1540 (size=79) 2024-11-23T17:31:03,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742365_1541 (size=79) 2024-11-23T17:31:03,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742364_1540 (size=79) 2024-11-23T17:31:03,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742365_1541 (size=79) 2024-11-23T17:31:03,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742365_1541 (size=79) 2024-11-23T17:31:03,990 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,1,1732383063971.68752a74e870a82bdc08e1d67f58f487.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:31:03,990 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,,1732383063971.d202495da3d84797e04c2f9bfdced9fd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:31:03,990 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1722): Closing d202495da3d84797e04c2f9bfdced9fd, disabling compactions & flushes 2024-11-23T17:31:03,990 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1722): Closing 68752a74e870a82bdc08e1d67f58f487, disabling compactions & flushes 2024-11-23T17:31:03,990 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,,1732383063971.d202495da3d84797e04c2f9bfdced9fd. 2024-11-23T17:31:03,990 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,1,1732383063971.68752a74e870a82bdc08e1d67f58f487. 2024-11-23T17:31:03,990 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,,1732383063971.d202495da3d84797e04c2f9bfdced9fd. 2024-11-23T17:31:03,990 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1732383063971.68752a74e870a82bdc08e1d67f58f487. 2024-11-23T17:31:03,990 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,,1732383063971.d202495da3d84797e04c2f9bfdced9fd. after waiting 0 ms 2024-11-23T17:31:03,990 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1732383063971.68752a74e870a82bdc08e1d67f58f487. after waiting 0 ms 2024-11-23T17:31:03,990 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,,1732383063971.d202495da3d84797e04c2f9bfdced9fd. 2024-11-23T17:31:03,990 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,1,1732383063971.68752a74e870a82bdc08e1d67f58f487. 2024-11-23T17:31:03,990 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,,1732383063971.d202495da3d84797e04c2f9bfdced9fd. 2024-11-23T17:31:03,990 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,1,1732383063971.68752a74e870a82bdc08e1d67f58f487. 2024-11-23T17:31:03,990 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1676): Region close journal for 68752a74e870a82bdc08e1d67f58f487: Waiting for close lock at 1732383063990Disabling compacts and flushes for region at 1732383063990Disabling writes for close at 1732383063990Writing region close event to WAL at 1732383063990Closed at 1732383063990 2024-11-23T17:31:03,990 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1676): Region close journal for d202495da3d84797e04c2f9bfdced9fd: Waiting for close lock at 1732383063990Disabling compacts and flushes for region at 1732383063990Disabling writes for close at 1732383063990Writing region close event to WAL at 1732383063990Closed at 1732383063990 2024-11-23T17:31:03,991 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=231, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_ADD_TO_META 2024-11-23T17:31:03,991 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSkipTmp,1,1732383063971.68752a74e870a82bdc08e1d67f58f487.","families":{"info":[{"qualifier":"regioninfo","vlen":78,"tag":[],"timestamp":"1732383063991"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732383063991"}]},"ts":"1732383063991"} 2024-11-23T17:31:03,991 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSkipTmp,,1732383063971.d202495da3d84797e04c2f9bfdced9fd.","families":{"info":[{"qualifier":"regioninfo","vlen":78,"tag":[],"timestamp":"1732383063991"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732383063991"}]},"ts":"1732383063991"} 2024-11-23T17:31:03,993 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-23T17:31:03,994 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=231, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-23T17:31:03,994 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732383063994"}]},"ts":"1732383063994"} 2024-11-23T17:31:03,995 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=ENABLING in hbase:meta 2024-11-23T17:31:03,995 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {215bb12e999f=0} racks are {/default-rack=0} 2024-11-23T17:31:03,996 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-23T17:31:03,996 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-23T17:31:03,996 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-23T17:31:03,996 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-23T17:31:03,996 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-23T17:31:03,996 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-23T17:31:03,996 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-23T17:31:03,996 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-23T17:31:03,996 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-23T17:31:03,996 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-23T17:31:03,996 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=232, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=d202495da3d84797e04c2f9bfdced9fd, ASSIGN}, {pid=233, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=68752a74e870a82bdc08e1d67f58f487, ASSIGN}] 2024-11-23T17:31:03,997 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=233, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=68752a74e870a82bdc08e1d67f58f487, ASSIGN 2024-11-23T17:31:03,997 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=232, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=d202495da3d84797e04c2f9bfdced9fd, ASSIGN 2024-11-23T17:31:03,998 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=232, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=d202495da3d84797e04c2f9bfdced9fd, ASSIGN; state=OFFLINE, location=215bb12e999f,44667,1732382620918; forceNewPlan=false, retain=false 2024-11-23T17:31:03,998 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=233, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=68752a74e870a82bdc08e1d67f58f487, ASSIGN; state=OFFLINE, location=215bb12e999f,41065,1732382621000; forceNewPlan=false, retain=false 2024-11-23T17:31:04,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=231 2024-11-23T17:31:04,148 INFO [215bb12e999f:39621 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-23T17:31:04,149 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=232 updating hbase:meta row=d202495da3d84797e04c2f9bfdced9fd, regionState=OPENING, regionLocation=215bb12e999f,44667,1732382620918 2024-11-23T17:31:04,149 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=233 updating hbase:meta row=68752a74e870a82bdc08e1d67f58f487, regionState=OPENING, regionLocation=215bb12e999f,41065,1732382621000 2024-11-23T17:31:04,154 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=233, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=68752a74e870a82bdc08e1d67f58f487, ASSIGN because future has completed 2024-11-23T17:31:04,154 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=234, ppid=233, state=RUNNABLE, hasLock=false; OpenRegionProcedure 68752a74e870a82bdc08e1d67f58f487, server=215bb12e999f,41065,1732382621000}] 2024-11-23T17:31:04,155 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=232, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=d202495da3d84797e04c2f9bfdced9fd, ASSIGN because future has completed 2024-11-23T17:31:04,156 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=235, ppid=232, state=RUNNABLE, hasLock=false; OpenRegionProcedure d202495da3d84797e04c2f9bfdced9fd, server=215bb12e999f,44667,1732382620918}] 2024-11-23T17:31:04,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=231 2024-11-23T17:31:04,314 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSkipTmp,,1732383063971.d202495da3d84797e04c2f9bfdced9fd. 2024-11-23T17:31:04,315 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegion(7752): Opening region: {ENCODED => d202495da3d84797e04c2f9bfdced9fd, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1732383063971.d202495da3d84797e04c2f9bfdced9fd.', STARTKEY => '', ENDKEY => '1'} 2024-11-23T17:31:04,316 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSkipTmp,1,1732383063971.68752a74e870a82bdc08e1d67f58f487. 2024-11-23T17:31:04,316 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSkipTmp,,1732383063971.d202495da3d84797e04c2f9bfdced9fd. service=AccessControlService 2024-11-23T17:31:04,316 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegion(7752): Opening region: {ENCODED => 68752a74e870a82bdc08e1d67f58f487, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1732383063971.68752a74e870a82bdc08e1d67f58f487.', STARTKEY => '1', ENDKEY => ''} 2024-11-23T17:31:04,316 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:31:04,317 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSkipTmp,1,1732383063971.68752a74e870a82bdc08e1d67f58f487. service=AccessControlService 2024-11-23T17:31:04,317 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSkipTmp d202495da3d84797e04c2f9bfdced9fd 2024-11-23T17:31:04,317 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-23T17:31:04,317 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,,1732383063971.d202495da3d84797e04c2f9bfdced9fd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:31:04,317 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSkipTmp 68752a74e870a82bdc08e1d67f58f487 2024-11-23T17:31:04,318 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegion(7794): checking encryption for d202495da3d84797e04c2f9bfdced9fd 2024-11-23T17:31:04,318 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,1,1732383063971.68752a74e870a82bdc08e1d67f58f487.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-23T17:31:04,318 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegion(7797): checking classloading for d202495da3d84797e04c2f9bfdced9fd 2024-11-23T17:31:04,318 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegion(7794): checking encryption for 68752a74e870a82bdc08e1d67f58f487 2024-11-23T17:31:04,318 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegion(7797): checking classloading for 68752a74e870a82bdc08e1d67f58f487 2024-11-23T17:31:04,320 INFO [StoreOpener-68752a74e870a82bdc08e1d67f58f487-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 68752a74e870a82bdc08e1d67f58f487 2024-11-23T17:31:04,320 INFO [StoreOpener-d202495da3d84797e04c2f9bfdced9fd-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region d202495da3d84797e04c2f9bfdced9fd 2024-11-23T17:31:04,322 INFO [StoreOpener-68752a74e870a82bdc08e1d67f58f487-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 68752a74e870a82bdc08e1d67f58f487 columnFamilyName cf 2024-11-23T17:31:04,322 INFO [StoreOpener-d202495da3d84797e04c2f9bfdced9fd-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d202495da3d84797e04c2f9bfdced9fd columnFamilyName cf 2024-11-23T17:31:04,322 DEBUG [StoreOpener-68752a74e870a82bdc08e1d67f58f487-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:31:04,322 DEBUG [StoreOpener-d202495da3d84797e04c2f9bfdced9fd-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-23T17:31:04,322 INFO [StoreOpener-68752a74e870a82bdc08e1d67f58f487-1 {}] regionserver.HStore(327): Store=68752a74e870a82bdc08e1d67f58f487/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T17:31:04,322 INFO [StoreOpener-d202495da3d84797e04c2f9bfdced9fd-1 {}] regionserver.HStore(327): Store=d202495da3d84797e04c2f9bfdced9fd/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-23T17:31:04,322 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegion(1038): replaying wal for 68752a74e870a82bdc08e1d67f58f487 2024-11-23T17:31:04,322 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegion(1038): replaying wal for d202495da3d84797e04c2f9bfdced9fd 2024-11-23T17:31:04,323 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithSkipTmp/68752a74e870a82bdc08e1d67f58f487 2024-11-23T17:31:04,323 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithSkipTmp/d202495da3d84797e04c2f9bfdced9fd 2024-11-23T17:31:04,323 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithSkipTmp/68752a74e870a82bdc08e1d67f58f487 2024-11-23T17:31:04,323 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithSkipTmp/d202495da3d84797e04c2f9bfdced9fd 2024-11-23T17:31:04,323 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegion(1048): stopping wal replay for 68752a74e870a82bdc08e1d67f58f487 2024-11-23T17:31:04,323 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegion(1048): stopping wal replay for d202495da3d84797e04c2f9bfdced9fd 2024-11-23T17:31:04,323 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegion(1060): Cleaning up temporary data for 68752a74e870a82bdc08e1d67f58f487 2024-11-23T17:31:04,323 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegion(1060): Cleaning up temporary data for d202495da3d84797e04c2f9bfdced9fd 2024-11-23T17:31:04,324 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegion(1093): writing seq id for d202495da3d84797e04c2f9bfdced9fd 2024-11-23T17:31:04,324 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegion(1093): writing seq id for 68752a74e870a82bdc08e1d67f58f487 2024-11-23T17:31:04,325 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithSkipTmp/d202495da3d84797e04c2f9bfdced9fd/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T17:31:04,326 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegion(1114): Opened d202495da3d84797e04c2f9bfdced9fd; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65302775, jitterRate=-0.02691282331943512}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T17:31:04,326 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegion(1122): Running coprocessor post-open hooks for d202495da3d84797e04c2f9bfdced9fd 2024-11-23T17:31:04,326 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegion(1006): Region open journal for d202495da3d84797e04c2f9bfdced9fd: Running coprocessor pre-open hook at 1732383064318Writing region info on filesystem at 1732383064318Initializing all the Stores at 1732383064320 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732383064320Cleaning up temporary data from old regions at 1732383064323 (+3 ms)Running coprocessor post-open hooks at 1732383064326 (+3 ms)Region opened successfully at 1732383064326 2024-11-23T17:31:04,327 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSkipTmp,,1732383063971.d202495da3d84797e04c2f9bfdced9fd., pid=235, masterSystemTime=1732383064308 2024-11-23T17:31:04,328 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSkipTmp,,1732383063971.d202495da3d84797e04c2f9bfdced9fd. 2024-11-23T17:31:04,328 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithSkipTmp/68752a74e870a82bdc08e1d67f58f487/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-23T17:31:04,328 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSkipTmp,,1732383063971.d202495da3d84797e04c2f9bfdced9fd. 2024-11-23T17:31:04,329 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegion(1114): Opened 68752a74e870a82bdc08e1d67f58f487; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69278206, jitterRate=0.03232571482658386}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-23T17:31:04,329 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 68752a74e870a82bdc08e1d67f58f487 2024-11-23T17:31:04,329 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegion(1006): Region open journal for 68752a74e870a82bdc08e1d67f58f487: Running coprocessor pre-open hook at 1732383064318Writing region info on filesystem at 1732383064318Initializing all the Stores at 1732383064320 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732383064320Cleaning up temporary data from old regions at 1732383064323 (+3 ms)Running coprocessor post-open hooks at 1732383064329 (+6 ms)Region opened successfully at 1732383064329 2024-11-23T17:31:04,329 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=232 updating hbase:meta row=d202495da3d84797e04c2f9bfdced9fd, regionState=OPEN, openSeqNum=2, regionLocation=215bb12e999f,44667,1732382620918 2024-11-23T17:31:04,330 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSkipTmp,1,1732383063971.68752a74e870a82bdc08e1d67f58f487., pid=234, masterSystemTime=1732383064308 2024-11-23T17:31:04,330 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=235, ppid=232, state=RUNNABLE, hasLock=false; OpenRegionProcedure d202495da3d84797e04c2f9bfdced9fd, server=215bb12e999f,44667,1732382620918 because future has completed 2024-11-23T17:31:04,331 DEBUG [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSkipTmp,1,1732383063971.68752a74e870a82bdc08e1d67f58f487. 2024-11-23T17:31:04,331 INFO [RS_OPEN_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSkipTmp,1,1732383063971.68752a74e870a82bdc08e1d67f58f487. 2024-11-23T17:31:04,332 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=233 updating hbase:meta row=68752a74e870a82bdc08e1d67f58f487, regionState=OPEN, openSeqNum=2, regionLocation=215bb12e999f,41065,1732382621000 2024-11-23T17:31:04,333 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=235, resume processing ppid=232 2024-11-23T17:31:04,333 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=234, ppid=233, state=RUNNABLE, hasLock=false; OpenRegionProcedure 68752a74e870a82bdc08e1d67f58f487, server=215bb12e999f,41065,1732382621000 because future has completed 2024-11-23T17:31:04,333 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=235, ppid=232, state=SUCCESS, hasLock=false; OpenRegionProcedure d202495da3d84797e04c2f9bfdced9fd, server=215bb12e999f,44667,1732382620918 in 175 msec 2024-11-23T17:31:04,334 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=232, ppid=231, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=d202495da3d84797e04c2f9bfdced9fd, ASSIGN in 337 msec 2024-11-23T17:31:04,335 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=234, resume processing ppid=233 2024-11-23T17:31:04,335 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=234, ppid=233, state=SUCCESS, hasLock=false; OpenRegionProcedure 68752a74e870a82bdc08e1d67f58f487, server=215bb12e999f,41065,1732382621000 in 180 msec 2024-11-23T17:31:04,336 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=233, resume processing ppid=231 2024-11-23T17:31:04,336 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=233, ppid=231, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=68752a74e870a82bdc08e1d67f58f487, ASSIGN in 339 msec 2024-11-23T17:31:04,336 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=231, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-23T17:31:04,337 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732383064336"}]},"ts":"1732383064336"} 2024-11-23T17:31:04,338 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=ENABLED in hbase:meta 2024-11-23T17:31:04,338 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=231, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_POST_OPERATION 2024-11-23T17:31:04,338 DEBUG [PEWorker-4 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithSkipTmp jenkins: RWXCA 2024-11-23T17:31:04,341 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41065 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-11-23T17:31:04,392 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45467-0x10168b8ee290001, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:31:04,392 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x10168b8ee290003, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:31:04,392 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44667-0x10168b8ee290002, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:31:04,392 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:31:04,405 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:31:04,405 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:31:04,405 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:31:04,406 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:31:04,406 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-11-23T17:31:04,406 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-11-23T17:31:04,406 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-11-23T17:31:04,406 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-11-23T17:31:04,407 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=231, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 432 msec 2024-11-23T17:31:04,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=231 2024-11-23T17:31:04,606 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-11-23T17:31:04,607 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportFileSystemStateWithSkipTmp get assigned. Timeout = 60000ms 2024-11-23T17:31:04,607 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T17:31:04,609 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44667 {}] regionserver.StoreScanner(1138): Switch to stream read (scanned=32844 bytes) of info 2024-11-23T17:31:04,611 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportFileSystemStateWithSkipTmp assigned to meta. Checking AM states. 2024-11-23T17:31:04,611 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T17:31:04,611 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportFileSystemStateWithSkipTmp assigned. 2024-11-23T17:31:04,611 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-23T17:31:04,614 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-11-23T17:31:04,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732383064614 (current time:1732383064614). 2024-11-23T17:31:04,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-23T17:31:04,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp VERSION not specified, setting to 2 2024-11-23T17:31:04,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-23T17:31:04,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4f4ee7e7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:31:04,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] client.ClusterIdFetcher(90): Going to request 215bb12e999f,39621,-1 for getting cluster id 2024-11-23T17:31:04,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:31:04,616 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d' 2024-11-23T17:31:04,616 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:31:04,616 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d" 2024-11-23T17:31:04,616 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2fea4587, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:31:04,617 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [215bb12e999f,39621,-1] 2024-11-23T17:31:04,617 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:31:04,617 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:31:04,618 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44058, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:31:04,618 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5141773b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:31:04,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:31:04,619 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=215bb12e999f,44667,1732382620918, seqNum=-1] 2024-11-23T17:31:04,619 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:31:04,620 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39282, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:31:04,621 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621. 2024-11-23T17:31:04,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:31:04,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:31:04,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:31:04,621 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:31:04,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@21ed5cb9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:31:04,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] client.ClusterIdFetcher(90): Going to request 215bb12e999f,39621,-1 for getting cluster id 2024-11-23T17:31:04,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:31:04,623 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d' 2024-11-23T17:31:04,623 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:31:04,623 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d" 2024-11-23T17:31:04,623 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@598dc362, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:31:04,623 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [215bb12e999f,39621,-1] 2024-11-23T17:31:04,623 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:31:04,623 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:31:04,624 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44086, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:31:04,624 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4330fa08, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:31:04,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:31:04,625 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=215bb12e999f,44667,1732382620918, seqNum=-1] 2024-11-23T17:31:04,626 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:31:04,627 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39292, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:31:04,628 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSkipTmp', locateType=CURRENT is [region=hbase:acl,,1732382625522.41264850465b123187f03a78b2521b39., hostname=215bb12e999f,41065,1732382621000, seqNum=2] 2024-11-23T17:31:04,629 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:31:04,629 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45334, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:31:04,631 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621. 2024-11-23T17:31:04,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor297.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:31:04,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:31:04,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:31:04,631 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:31:04,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-11-23T17:31:04,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-23T17:31:04,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] procedure2.ProcedureExecutor(1139): Stored pid=236, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=236, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-11-23T17:31:04,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 236 2024-11-23T17:31:04,634 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=236, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=236, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-23T17:31:04,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=236 2024-11-23T17:31:04,635 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=236, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=236, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-23T17:31:04,637 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=236, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=236, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-23T17:31:04,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742366_1542 (size=203) 2024-11-23T17:31:04,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742366_1542 (size=203) 2024-11-23T17:31:04,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742366_1542 (size=203) 2024-11-23T17:31:04,647 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=236, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=236, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-23T17:31:04,647 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=237, ppid=236, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d202495da3d84797e04c2f9bfdced9fd}, {pid=238, ppid=236, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 68752a74e870a82bdc08e1d67f58f487}] 2024-11-23T17:31:04,647 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=237, ppid=236, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d202495da3d84797e04c2f9bfdced9fd 2024-11-23T17:31:04,647 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=238, ppid=236, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 68752a74e870a82bdc08e1d67f58f487 2024-11-23T17:31:04,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=236 2024-11-23T17:31:04,800 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44667 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=237 2024-11-23T17:31:04,800 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41065 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=238 2024-11-23T17:31:04,801 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=238}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1732383063971.68752a74e870a82bdc08e1d67f58f487. 2024-11-23T17:31:04,801 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=237}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1732383063971.d202495da3d84797e04c2f9bfdced9fd. 2024-11-23T17:31:04,802 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=238}] regionserver.HRegion(2603): Flush status journal for 68752a74e870a82bdc08e1d67f58f487: 2024-11-23T17:31:04,802 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=237}] regionserver.HRegion(2603): Flush status journal for d202495da3d84797e04c2f9bfdced9fd: 2024-11-23T17:31:04,802 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=238}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,1,1732383063971.68752a74e870a82bdc08e1d67f58f487. for emptySnaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-11-23T17:31:04,802 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=237}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,,1732383063971.d202495da3d84797e04c2f9bfdced9fd. for emptySnaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-11-23T17:31:04,802 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=237}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,,1732383063971.d202495da3d84797e04c2f9bfdced9fd.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-23T17:31:04,802 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=238}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,1,1732383063971.68752a74e870a82bdc08e1d67f58f487.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-23T17:31:04,802 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=237}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:31:04,802 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=238}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:31:04,802 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=237}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-23T17:31:04,802 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=238}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-23T17:31:04,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742367_1543 (size=82) 2024-11-23T17:31:04,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742367_1543 (size=82) 2024-11-23T17:31:04,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742367_1543 (size=82) 2024-11-23T17:31:04,811 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=238}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1732383063971.68752a74e870a82bdc08e1d67f58f487. 2024-11-23T17:31:04,811 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=238}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=238 2024-11-23T17:31:04,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.HMaster(4169): Remote procedure done, pid=238 2024-11-23T17:31:04,811 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp on region 68752a74e870a82bdc08e1d67f58f487 2024-11-23T17:31:04,812 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=238, ppid=236, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 68752a74e870a82bdc08e1d67f58f487 2024-11-23T17:31:04,813 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=238, ppid=236, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 68752a74e870a82bdc08e1d67f58f487 in 165 msec 2024-11-23T17:31:04,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742368_1544 (size=82) 2024-11-23T17:31:04,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742368_1544 (size=82) 2024-11-23T17:31:04,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742368_1544 (size=82) 2024-11-23T17:31:04,821 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=237}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1732383063971.d202495da3d84797e04c2f9bfdced9fd. 2024-11-23T17:31:04,821 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=237}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=237 2024-11-23T17:31:04,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.HMaster(4169): Remote procedure done, pid=237 2024-11-23T17:31:04,822 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp on region d202495da3d84797e04c2f9bfdced9fd 2024-11-23T17:31:04,822 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=237, ppid=236, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d202495da3d84797e04c2f9bfdced9fd 2024-11-23T17:31:04,825 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=237, resume processing ppid=236 2024-11-23T17:31:04,825 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=236, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=236, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-23T17:31:04,825 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=237, ppid=236, state=SUCCESS, hasLock=false; SnapshotRegionProcedure d202495da3d84797e04c2f9bfdced9fd in 176 msec 2024-11-23T17:31:04,825 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=236, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=236, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-23T17:31:04,826 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=236, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=236, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-23T17:31:04,826 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-23T17:31:04,826 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-23T17:31:04,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742369_1545 (size=585) 2024-11-23T17:31:04,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742369_1545 (size=585) 2024-11-23T17:31:04,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742369_1545 (size=585) 2024-11-23T17:31:04,834 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=236, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=236, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-23T17:31:04,838 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=236, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=236, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-23T17:31:04,839 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-23T17:31:04,840 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=236, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=236, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-23T17:31:04,840 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 236 2024-11-23T17:31:04,841 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=236, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=236, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } in 208 msec 2024-11-23T17:31:04,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=236 2024-11-23T17:31:04,946 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-11-23T17:31:04,949 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='04c5f1b8aed20b000b9949a8f4868228b', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,,1732383063971.d202495da3d84797e04c2f9bfdced9fd., hostname=215bb12e999f,44667,1732382620918, seqNum=2] 2024-11-23T17:31:04,950 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='14f107770204e010bdf8674438d08e066', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,1,1732383063971.68752a74e870a82bdc08e1d67f58f487., hostname=215bb12e999f,41065,1732382621000, seqNum=2] 2024-11-23T17:31:04,951 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='2e1ac457e9c590e02cc871f5006e982f4', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,1,1732383063971.68752a74e870a82bdc08e1d67f58f487., hostname=215bb12e999f,41065,1732382621000, seqNum=2] 2024-11-23T17:31:04,952 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='38d63fbe8f331822688020d2310481454', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,1,1732383063971.68752a74e870a82bdc08e1d67f58f487., hostname=215bb12e999f,41065,1732382621000, seqNum=2] 2024-11-23T17:31:04,953 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='42a22b483183e0ab1d8c34bfd5e5f356e', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,1,1732383063971.68752a74e870a82bdc08e1d67f58f487., hostname=215bb12e999f,41065,1732382621000, seqNum=2] 2024-11-23T17:31:04,953 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='5ba2138a8c530e642ad88b0ead3ad11f2', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,1,1732383063971.68752a74e870a82bdc08e1d67f58f487., hostname=215bb12e999f,41065,1732382621000, seqNum=2] 2024-11-23T17:31:04,954 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='654a4faa630e308a7f4cf2a0de6cacde3', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,1,1732383063971.68752a74e870a82bdc08e1d67f58f487., hostname=215bb12e999f,41065,1732382621000, seqNum=2] 2024-11-23T17:31:04,955 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44667 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSkipTmp,,1732383063971.d202495da3d84797e04c2f9bfdced9fd. with WAL disabled. Data may be lost in the event of a crash. 2024-11-23T17:31:04,957 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41065 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSkipTmp,1,1732383063971.68752a74e870a82bdc08e1d67f58f487. with WAL disabled. Data may be lost in the event of a crash. 2024-11-23T17:31:04,958 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-23T17:31:04,961 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithSkipTmp 2024-11-23T17:31:04,961 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithSkipTmp,,1732383063971.d202495da3d84797e04c2f9bfdced9fd. 2024-11-23T17:31:04,961 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-23T17:31:04,962 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-23T17:31:04,967 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-23T17:31:04,971 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-23T17:31:04,973 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-11-23T17:31:04,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732383064973 (current time:1732383064973). 2024-11-23T17:31:04,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-23T17:31:04,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithSkipTmp VERSION not specified, setting to 2 2024-11-23T17:31:04,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-23T17:31:04,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@40e0619f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:31:04,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] client.ClusterIdFetcher(90): Going to request 215bb12e999f,39621,-1 for getting cluster id 2024-11-23T17:31:04,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:31:04,975 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d' 2024-11-23T17:31:04,975 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:31:04,975 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d" 2024-11-23T17:31:04,975 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@55026b2e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:31:04,975 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [215bb12e999f,39621,-1] 2024-11-23T17:31:04,975 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:31:04,975 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:31:04,976 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44114, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:31:04,976 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3ba5f0f4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:31:04,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:31:04,977 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=215bb12e999f,44667,1732382620918, seqNum=-1] 2024-11-23T17:31:04,977 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:31:04,978 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39296, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:31:04,979 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621. 2024-11-23T17:31:04,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:31:04,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:31:04,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:31:04,979 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:31:04,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@33c8fcff, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:31:04,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] client.ClusterIdFetcher(90): Going to request 215bb12e999f,39621,-1 for getting cluster id 2024-11-23T17:31:04,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-23T17:31:04,980 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d' 2024-11-23T17:31:04,980 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-23T17:31:04,980 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "abe3b5aa-b8c6-4cdb-bd02-1d9741b5316d" 2024-11-23T17:31:04,980 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@59b774dd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:31:04,981 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [215bb12e999f,39621,-1] 2024-11-23T17:31:04,981 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-23T17:31:04,981 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:31:04,981 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44124, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-23T17:31:04,982 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@54a25f08, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-23T17:31:04,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-23T17:31:04,983 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=215bb12e999f,44667,1732382620918, seqNum=-1] 2024-11-23T17:31:04,983 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:31:04,984 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39308, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:31:04,985 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSkipTmp', locateType=CURRENT is [region=hbase:acl,,1732382625522.41264850465b123187f03a78b2521b39., hostname=215bb12e999f,41065,1732382621000, seqNum=2] 2024-11-23T17:31:04,985 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-23T17:31:04,986 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45350, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-23T17:31:04,987 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621. 2024-11-23T17:31:04,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor297.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-23T17:31:04,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:31:04,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:31:04,987 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:31:04,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-11-23T17:31:04,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-23T17:31:04,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] procedure2.ProcedureExecutor(1139): Stored pid=239, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-11-23T17:31:04,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 239 2024-11-23T17:31:04,989 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-23T17:31:04,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=239 2024-11-23T17:31:04,990 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-23T17:31:04,992 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-23T17:31:04,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742370_1546 (size=198) 2024-11-23T17:31:04,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742370_1546 (size=198) 2024-11-23T17:31:04,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742370_1546 (size=198) 2024-11-23T17:31:04,998 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-23T17:31:04,998 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=240, ppid=239, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d202495da3d84797e04c2f9bfdced9fd}, {pid=241, ppid=239, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 68752a74e870a82bdc08e1d67f58f487}] 2024-11-23T17:31:04,998 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=241, ppid=239, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 68752a74e870a82bdc08e1d67f58f487 2024-11-23T17:31:04,998 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=240, ppid=239, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d202495da3d84797e04c2f9bfdced9fd 2024-11-23T17:31:05,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=239 2024-11-23T17:31:05,151 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41065 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=241 2024-11-23T17:31:05,151 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44667 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=240 2024-11-23T17:31:05,151 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1732383063971.68752a74e870a82bdc08e1d67f58f487. 2024-11-23T17:31:05,152 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1732383063971.d202495da3d84797e04c2f9bfdced9fd. 2024-11-23T17:31:05,152 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.HRegion(2902): Flushing d202495da3d84797e04c2f9bfdced9fd 1/1 column families, dataSize=333 B heapSize=976 B 2024-11-23T17:31:05,152 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.HRegion(2902): Flushing 68752a74e870a82bdc08e1d67f58f487 1/1 column families, dataSize=2.93 KB heapSize=6.58 KB 2024-11-23T17:31:05,170 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithSkipTmp/68752a74e870a82bdc08e1d67f58f487/.tmp/cf/55d2759df7fd4801804475275c0b9500 is 71, key is 1b528c7fd48bc33b5f31b697ecb311ae/cf:q/1732383064957/Put/seqid=0 2024-11-23T17:31:05,170 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithSkipTmp/d202495da3d84797e04c2f9bfdced9fd/.tmp/cf/715f9ebd8b90430da37aa40ffc717038 is 71, key is 0325f5ec3410be32c5f9cde6b50a8322/cf:q/1732383064955/Put/seqid=0 2024-11-23T17:31:05,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742371_1547 (size=5422) 2024-11-23T17:31:05,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742371_1547 (size=5422) 2024-11-23T17:31:05,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742372_1548 (size=8190) 2024-11-23T17:31:05,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742372_1548 (size=8190) 2024-11-23T17:31:05,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742372_1548 (size=8190) 2024-11-23T17:31:05,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742371_1547 (size=5422) 2024-11-23T17:31:05,176 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.93 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithSkipTmp/68752a74e870a82bdc08e1d67f58f487/.tmp/cf/55d2759df7fd4801804475275c0b9500 2024-11-23T17:31:05,176 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=333 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithSkipTmp/d202495da3d84797e04c2f9bfdced9fd/.tmp/cf/715f9ebd8b90430da37aa40ffc717038 2024-11-23T17:31:05,180 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithSkipTmp/68752a74e870a82bdc08e1d67f58f487/.tmp/cf/55d2759df7fd4801804475275c0b9500 as hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithSkipTmp/68752a74e870a82bdc08e1d67f58f487/cf/55d2759df7fd4801804475275c0b9500 2024-11-23T17:31:05,180 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithSkipTmp/d202495da3d84797e04c2f9bfdced9fd/.tmp/cf/715f9ebd8b90430da37aa40ffc717038 as hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithSkipTmp/d202495da3d84797e04c2f9bfdced9fd/cf/715f9ebd8b90430da37aa40ffc717038 2024-11-23T17:31:05,184 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithSkipTmp/68752a74e870a82bdc08e1d67f58f487/cf/55d2759df7fd4801804475275c0b9500, entries=45, sequenceid=6, filesize=8.0 K 2024-11-23T17:31:05,184 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithSkipTmp/d202495da3d84797e04c2f9bfdced9fd/cf/715f9ebd8b90430da37aa40ffc717038, entries=5, sequenceid=6, filesize=5.3 K 2024-11-23T17:31:05,185 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.HRegion(3140): Finished flush of dataSize ~333 B/333, heapSize ~960 B/960, currentSize=0 B/0 for d202495da3d84797e04c2f9bfdced9fd in 33ms, sequenceid=6, compaction requested=false 2024-11-23T17:31:05,185 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.HRegion(3140): Finished flush of dataSize ~2.93 KB/3003, heapSize ~6.56 KB/6720, currentSize=0 B/0 for 68752a74e870a82bdc08e1d67f58f487 in 33ms, sequenceid=6, compaction requested=false 2024-11-23T17:31:05,185 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithSkipTmp' 2024-11-23T17:31:05,185 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithSkipTmp' 2024-11-23T17:31:05,185 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.HRegion(2603): Flush status journal for d202495da3d84797e04c2f9bfdced9fd: 2024-11-23T17:31:05,185 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.HRegion(2603): Flush status journal for 68752a74e870a82bdc08e1d67f58f487: 2024-11-23T17:31:05,185 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,,1732383063971.d202495da3d84797e04c2f9bfdced9fd. for snaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-11-23T17:31:05,185 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,1,1732383063971.68752a74e870a82bdc08e1d67f58f487. for snaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-11-23T17:31:05,185 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,,1732383063971.d202495da3d84797e04c2f9bfdced9fd.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-23T17:31:05,185 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,1,1732383063971.68752a74e870a82bdc08e1d67f58f487.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-23T17:31:05,185 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:31:05,185 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-23T17:31:05,185 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithSkipTmp/d202495da3d84797e04c2f9bfdced9fd/cf/715f9ebd8b90430da37aa40ffc717038] hfiles 2024-11-23T17:31:05,185 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithSkipTmp/68752a74e870a82bdc08e1d67f58f487/cf/55d2759df7fd4801804475275c0b9500] hfiles 2024-11-23T17:31:05,185 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithSkipTmp/d202495da3d84797e04c2f9bfdced9fd/cf/715f9ebd8b90430da37aa40ffc717038 for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-23T17:31:05,185 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithSkipTmp/68752a74e870a82bdc08e1d67f58f487/cf/55d2759df7fd4801804475275c0b9500 for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-23T17:31:05,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742374_1550 (size=121) 2024-11-23T17:31:05,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742373_1549 (size=121) 2024-11-23T17:31:05,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742373_1549 (size=121) 2024-11-23T17:31:05,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742374_1550 (size=121) 2024-11-23T17:31:05,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742373_1549 (size=121) 2024-11-23T17:31:05,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742374_1550 (size=121) 2024-11-23T17:31:05,191 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1732383063971.68752a74e870a82bdc08e1d67f58f487. 2024-11-23T17:31:05,191 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1732383063971.d202495da3d84797e04c2f9bfdced9fd. 2024-11-23T17:31:05,191 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=240 2024-11-23T17:31:05,191 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/215bb12e999f:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=241 2024-11-23T17:31:05,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.HMaster(4169): Remote procedure done, pid=241 2024-11-23T17:31:05,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] master.HMaster(4169): Remote procedure done, pid=240 2024-11-23T17:31:05,191 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSkipTmp on region d202495da3d84797e04c2f9bfdced9fd 2024-11-23T17:31:05,191 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSkipTmp on region 68752a74e870a82bdc08e1d67f58f487 2024-11-23T17:31:05,191 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=241, ppid=239, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 68752a74e870a82bdc08e1d67f58f487 2024-11-23T17:31:05,191 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=240, ppid=239, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d202495da3d84797e04c2f9bfdced9fd 2024-11-23T17:31:05,193 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=240, ppid=239, state=SUCCESS, hasLock=false; SnapshotRegionProcedure d202495da3d84797e04c2f9bfdced9fd in 194 msec 2024-11-23T17:31:05,194 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=241, resume processing ppid=239 2024-11-23T17:31:05,194 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=241, ppid=239, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 68752a74e870a82bdc08e1d67f58f487 in 194 msec 2024-11-23T17:31:05,194 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-23T17:31:05,194 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-23T17:31:05,195 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-23T17:31:05,195 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-23T17:31:05,195 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-23T17:31:05,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742375_1551 (size=663) 2024-11-23T17:31:05,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742375_1551 (size=663) 2024-11-23T17:31:05,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742375_1551 (size=663) 2024-11-23T17:31:05,206 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-23T17:31:05,210 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-23T17:31:05,210 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-23T17:31:05,211 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-23T17:31:05,211 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 239 2024-11-23T17:31:05,212 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=239, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } in 223 msec 2024-11-23T17:31:05,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=239 2024-11-23T17:31:05,306 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-11-23T17:31:05,306 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732383065306 2024-11-23T17:31:05,306 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:37605, tgtDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732383065306, rawTgtDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732383065306, srcFsUri=hdfs://localhost:37605, srcDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf 2024-11-23T17:31:05,335 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:37605, inputRoot=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf 2024-11-23T17:31:05,335 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1325804072_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732383065306, skipTmp=true, initialOutputSnapshotDir=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732383065306/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-23T17:31:05,336 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-11-23T17:31:05,339 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732383065306/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-23T17:31:05,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742376_1552 (size=198) 2024-11-23T17:31:05,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742376_1552 (size=198) 2024-11-23T17:31:05,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742376_1552 (size=198) 2024-11-23T17:31:05,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742377_1553 (size=663) 2024-11-23T17:31:05,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742377_1553 (size=663) 2024-11-23T17:31:05,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742377_1553 (size=663) 2024-11-23T17:31:05,353 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:31:05,353 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:31:05,353 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:31:06,334 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/hadoop-7783536665864944881.jar 2024-11-23T17:31:06,334 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:31:06,335 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:31:06,397 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/hadoop-16648631220561008338.jar 2024-11-23T17:31:06,397 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:31:06,398 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:31:06,398 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:31:06,398 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:31:06,398 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:31:06,398 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-23T17:31:06,398 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-23T17:31:06,399 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-23T17:31:06,399 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-23T17:31:06,399 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-23T17:31:06,399 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-23T17:31:06,399 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-23T17:31:06,399 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-23T17:31:06,400 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-23T17:31:06,400 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-23T17:31:06,400 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-23T17:31:06,400 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-23T17:31:06,400 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:31:06,401 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:31:06,401 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T17:31:06,401 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:31:06,401 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-23T17:31:06,401 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T17:31:06,401 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-23T17:31:06,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742378_1554 (size=131440) 2024-11-23T17:31:06,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742378_1554 (size=131440) 2024-11-23T17:31:06,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742378_1554 (size=131440) 2024-11-23T17:31:06,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742379_1555 (size=4188619) 2024-11-23T17:31:06,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742379_1555 (size=4188619) 2024-11-23T17:31:06,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742379_1555 (size=4188619) 2024-11-23T17:31:06,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742380_1556 (size=440957) 2024-11-23T17:31:06,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742380_1556 (size=440957) 2024-11-23T17:31:06,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742380_1556 (size=440957) 2024-11-23T17:31:06,840 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-0_1/usercache/jenkins/appcache/application_1732382632246_0010/container_1732382632246_0010_01_000003/launch_container.sh] 2024-11-23T17:31:06,841 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-0_1/usercache/jenkins/appcache/application_1732382632246_0010/container_1732382632246_0010_01_000003/container_tokens] 2024-11-23T17:31:06,841 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-0_1/usercache/jenkins/appcache/application_1732382632246_0010/container_1732382632246_0010_01_000003/sysfs] 2024-11-23T17:31:06,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742381_1557 (size=1323991) 2024-11-23T17:31:06,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742381_1557 (size=1323991) 2024-11-23T17:31:06,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742381_1557 (size=1323991) 2024-11-23T17:31:06,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742382_1558 (size=903733) 2024-11-23T17:31:06,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742382_1558 (size=903733) 2024-11-23T17:31:06,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742382_1558 (size=903733) 2024-11-23T17:31:06,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742383_1559 (size=8360083) 2024-11-23T17:31:06,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742383_1559 (size=8360083) 2024-11-23T17:31:06,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742383_1559 (size=8360083) 2024-11-23T17:31:06,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742384_1560 (size=1877034) 2024-11-23T17:31:06,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742384_1560 (size=1877034) 2024-11-23T17:31:06,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742384_1560 (size=1877034) 2024-11-23T17:31:06,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742385_1561 (size=77835) 2024-11-23T17:31:06,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742385_1561 (size=77835) 2024-11-23T17:31:06,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742385_1561 (size=77835) 2024-11-23T17:31:06,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742386_1562 (size=30949) 2024-11-23T17:31:06,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742386_1562 (size=30949) 2024-11-23T17:31:06,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742386_1562 (size=30949) 2024-11-23T17:31:06,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742387_1563 (size=1597347) 2024-11-23T17:31:06,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742387_1563 (size=1597347) 2024-11-23T17:31:06,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742387_1563 (size=1597347) 2024-11-23T17:31:07,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742388_1564 (size=4695811) 2024-11-23T17:31:07,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742388_1564 (size=4695811) 2024-11-23T17:31:07,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742388_1564 (size=4695811) 2024-11-23T17:31:07,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742389_1565 (size=232957) 2024-11-23T17:31:07,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742389_1565 (size=232957) 2024-11-23T17:31:07,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742389_1565 (size=232957) 2024-11-23T17:31:07,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742390_1566 (size=127628) 2024-11-23T17:31:07,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742390_1566 (size=127628) 2024-11-23T17:31:07,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742390_1566 (size=127628) 2024-11-23T17:31:07,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742391_1567 (size=20406) 2024-11-23T17:31:07,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742391_1567 (size=20406) 2024-11-23T17:31:07,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742391_1567 (size=20406) 2024-11-23T17:31:07,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742392_1568 (size=5175431) 2024-11-23T17:31:07,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742392_1568 (size=5175431) 2024-11-23T17:31:07,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742392_1568 (size=5175431) 2024-11-23T17:31:07,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742393_1569 (size=217634) 2024-11-23T17:31:07,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742393_1569 (size=217634) 2024-11-23T17:31:07,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742393_1569 (size=217634) 2024-11-23T17:31:07,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742394_1570 (size=1832290) 2024-11-23T17:31:07,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742394_1570 (size=1832290) 2024-11-23T17:31:07,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742394_1570 (size=1832290) 2024-11-23T17:31:07,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742395_1571 (size=322274) 2024-11-23T17:31:07,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742395_1571 (size=322274) 2024-11-23T17:31:07,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742395_1571 (size=322274) 2024-11-23T17:31:07,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742396_1572 (size=503880) 2024-11-23T17:31:07,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742396_1572 (size=503880) 2024-11-23T17:31:07,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742396_1572 (size=503880) 2024-11-23T17:31:07,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742397_1573 (size=29229) 2024-11-23T17:31:07,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742397_1573 (size=29229) 2024-11-23T17:31:07,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742397_1573 (size=29229) 2024-11-23T17:31:07,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742398_1574 (size=24096) 2024-11-23T17:31:07,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742398_1574 (size=24096) 2024-11-23T17:31:07,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742398_1574 (size=24096) 2024-11-23T17:31:07,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742399_1575 (size=111872) 2024-11-23T17:31:07,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742399_1575 (size=111872) 2024-11-23T17:31:07,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742399_1575 (size=111872) 2024-11-23T17:31:07,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742400_1576 (size=6424742) 2024-11-23T17:31:07,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742400_1576 (size=6424742) 2024-11-23T17:31:07,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742400_1576 (size=6424742) 2024-11-23T17:31:07,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742401_1577 (size=45609) 2024-11-23T17:31:07,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742401_1577 (size=45609) 2024-11-23T17:31:07,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742401_1577 (size=45609) 2024-11-23T17:31:07,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742402_1578 (size=136454) 2024-11-23T17:31:07,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742402_1578 (size=136454) 2024-11-23T17:31:07,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742402_1578 (size=136454) 2024-11-23T17:31:07,598 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-23T17:31:07,600 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportFileSystemStateWithSkipTmp' hfile list 2024-11-23T17:31:07,602 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=8.0 K 2024-11-23T17:31:07,602 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=5.3 K 2024-11-23T17:31:07,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742403_1579 (size=469) 2024-11-23T17:31:07,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742403_1579 (size=469) 2024-11-23T17:31:07,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742403_1579 (size=469) 2024-11-23T17:31:07,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742404_1580 (size=21) 2024-11-23T17:31:07,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742404_1580 (size=21) 2024-11-23T17:31:07,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742404_1580 (size=21) 2024-11-23T17:31:07,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742405_1581 (size=304172) 2024-11-23T17:31:07,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742405_1581 (size=304172) 2024-11-23T17:31:07,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742405_1581 (size=304172) 2024-11-23T17:31:08,092 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T17:31:08,277 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-23T17:31:08,277 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-23T17:31:08,283 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732382632246_0010_000001 (auth:SIMPLE) from 127.0.0.1:32848 2024-11-23T17:31:08,291 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-0_1/usercache/jenkins/appcache/application_1732382632246_0010/container_1732382632246_0010_01_000001/launch_container.sh] 2024-11-23T17:31:08,291 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-0_1/usercache/jenkins/appcache/application_1732382632246_0010/container_1732382632246_0010_01_000001/container_tokens] 2024-11-23T17:31:08,292 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-0_1/usercache/jenkins/appcache/application_1732382632246_0010/container_1732382632246_0010_01_000001/sysfs] 2024-11-23T17:31:09,194 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-23T17:31:09,215 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732382632246_0011_000001 (auth:SIMPLE) from 127.0.0.1:46786 2024-11-23T17:31:09,806 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp 2024-11-23T17:31:09,806 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp Metrics about Tables on a single HBase RegionServer 2024-11-23T17:31:09,807 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum 2024-11-23T17:31:14,928 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732382632246_0011_000001 (auth:SIMPLE) from 127.0.0.1:58644 2024-11-23T17:31:15,309 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-23T17:31:15,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742406_1582 (size=349894) 2024-11-23T17:31:15,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742406_1582 (size=349894) 2024-11-23T17:31:15,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742406_1582 (size=349894) 2024-11-23T17:31:17,293 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 41264850465b123187f03a78b2521b39, had cached 0 bytes from a total of 5595 2024-11-23T17:31:17,658 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732382632246_0011_000001 (auth:SIMPLE) from 127.0.0.1:35834 2024-11-23T17:31:17,663 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732382632246_0011_000001 (auth:SIMPLE) from 127.0.0.1:38006 2024-11-23T17:31:23,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742407_1583 (size=8190) 2024-11-23T17:31:23,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742407_1583 (size=8190) 2024-11-23T17:31:23,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742407_1583 (size=8190) 2024-11-23T17:31:24,593 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-1_2/usercache/jenkins/appcache/application_1732382632246_0011/container_1732382632246_0011_01_000002/launch_container.sh] 2024-11-23T17:31:24,593 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-1_2/usercache/jenkins/appcache/application_1732382632246_0011/container_1732382632246_0011_01_000002/container_tokens] 2024-11-23T17:31:24,593 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-1_2/usercache/jenkins/appcache/application_1732382632246_0011/container_1732382632246_0011_01_000002/sysfs] 2024-11-23T17:31:25,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742409_1585 (size=5422) 2024-11-23T17:31:25,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742409_1585 (size=5422) 2024-11-23T17:31:25,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742409_1585 (size=5422) 2024-11-23T17:31:25,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742408_1584 (size=22217) 2024-11-23T17:31:25,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742408_1584 (size=22217) 2024-11-23T17:31:25,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742408_1584 (size=22217) 2024-11-23T17:31:25,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742410_1586 (size=477) 2024-11-23T17:31:25,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742410_1586 (size=477) 2024-11-23T17:31:25,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742410_1586 (size=477) 2024-11-23T17:31:25,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742411_1587 (size=22217) 2024-11-23T17:31:25,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742411_1587 (size=22217) 2024-11-23T17:31:25,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742411_1587 (size=22217) 2024-11-23T17:31:25,330 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-0_1/usercache/jenkins/appcache/application_1732382632246_0011/container_1732382632246_0011_01_000003/launch_container.sh] 2024-11-23T17:31:25,330 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-0_1/usercache/jenkins/appcache/application_1732382632246_0011/container_1732382632246_0011_01_000003/container_tokens] 2024-11-23T17:31:25,330 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-0_1/usercache/jenkins/appcache/application_1732382632246_0011/container_1732382632246_0011_01_000003/sysfs] 2024-11-23T17:31:25,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742412_1588 (size=349894) 2024-11-23T17:31:25,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742412_1588 (size=349894) 2024-11-23T17:31:25,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742412_1588 (size=349894) 2024-11-23T17:31:25,700 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732382632246_0011_000001 (auth:SIMPLE) from 127.0.0.1:56994 2024-11-23T17:31:27,589 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-11-23T17:31:27,589 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-11-23T17:31:27,594 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-23T17:31:27,594 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-11-23T17:31:27,594 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-11-23T17:31:27,594 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1325804072_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp at hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-23T17:31:27,595 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/.snapshotinfo 2024-11-23T17:31:27,595 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/data.manifest 2024-11-23T17:31:27,595 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-1325804072_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732383065306/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp at hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732383065306/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-23T17:31:27,595 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732383065306/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/.snapshotinfo 2024-11-23T17:31:27,595 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/export-test/export-1732383065306/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/data.manifest 2024-11-23T17:31:27,601 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithSkipTmp 2024-11-23T17:31:27,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] procedure2.ProcedureExecutor(1139): Stored pid=242, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-23T17:31:27,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=242 2024-11-23T17:31:27,604 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732383087604"}]},"ts":"1732383087604"} 2024-11-23T17:31:27,606 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=DISABLING in hbase:meta 2024-11-23T17:31:27,606 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithSkipTmp to state=DISABLING 2024-11-23T17:31:27,607 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=243, ppid=242, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSkipTmp}] 2024-11-23T17:31:27,608 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=244, ppid=243, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=d202495da3d84797e04c2f9bfdced9fd, UNASSIGN}, {pid=245, ppid=243, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=68752a74e870a82bdc08e1d67f58f487, UNASSIGN}] 2024-11-23T17:31:27,608 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=245, ppid=243, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=68752a74e870a82bdc08e1d67f58f487, UNASSIGN 2024-11-23T17:31:27,608 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=244, ppid=243, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=d202495da3d84797e04c2f9bfdced9fd, UNASSIGN 2024-11-23T17:31:27,609 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=244 updating hbase:meta row=d202495da3d84797e04c2f9bfdced9fd, regionState=CLOSING, regionLocation=215bb12e999f,44667,1732382620918 2024-11-23T17:31:27,609 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=245 updating hbase:meta row=68752a74e870a82bdc08e1d67f58f487, regionState=CLOSING, regionLocation=215bb12e999f,41065,1732382621000 2024-11-23T17:31:27,610 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=245, ppid=243, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=68752a74e870a82bdc08e1d67f58f487, UNASSIGN because future has completed 2024-11-23T17:31:27,610 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T17:31:27,610 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=246, ppid=245, state=RUNNABLE, hasLock=false; CloseRegionProcedure 68752a74e870a82bdc08e1d67f58f487, server=215bb12e999f,41065,1732382621000}] 2024-11-23T17:31:27,611 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=244, ppid=243, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=d202495da3d84797e04c2f9bfdced9fd, UNASSIGN because future has completed 2024-11-23T17:31:27,611 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-23T17:31:27,611 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=247, ppid=244, state=RUNNABLE, hasLock=false; CloseRegionProcedure d202495da3d84797e04c2f9bfdced9fd, server=215bb12e999f,44667,1732382620918}] 2024-11-23T17:31:27,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=242 2024-11-23T17:31:27,763 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=246}] handler.UnassignRegionHandler(122): Close 68752a74e870a82bdc08e1d67f58f487 2024-11-23T17:31:27,763 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=246}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T17:31:27,763 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=246}] regionserver.HRegion(1722): Closing 68752a74e870a82bdc08e1d67f58f487, disabling compactions & flushes 2024-11-23T17:31:27,763 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=246}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,1,1732383063971.68752a74e870a82bdc08e1d67f58f487. 2024-11-23T17:31:27,763 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=246}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1732383063971.68752a74e870a82bdc08e1d67f58f487. 2024-11-23T17:31:27,763 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=246}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1732383063971.68752a74e870a82bdc08e1d67f58f487. after waiting 0 ms 2024-11-23T17:31:27,763 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=246}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,1,1732383063971.68752a74e870a82bdc08e1d67f58f487. 2024-11-23T17:31:27,763 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=247}] handler.UnassignRegionHandler(122): Close d202495da3d84797e04c2f9bfdced9fd 2024-11-23T17:31:27,763 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=247}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-23T17:31:27,763 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=247}] regionserver.HRegion(1722): Closing d202495da3d84797e04c2f9bfdced9fd, disabling compactions & flushes 2024-11-23T17:31:27,763 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=247}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,,1732383063971.d202495da3d84797e04c2f9bfdced9fd. 2024-11-23T17:31:27,764 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=247}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,,1732383063971.d202495da3d84797e04c2f9bfdced9fd. 2024-11-23T17:31:27,764 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=247}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,,1732383063971.d202495da3d84797e04c2f9bfdced9fd. after waiting 0 ms 2024-11-23T17:31:27,764 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=247}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,,1732383063971.d202495da3d84797e04c2f9bfdced9fd. 2024-11-23T17:31:27,767 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=246}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithSkipTmp/68752a74e870a82bdc08e1d67f58f487/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-23T17:31:27,767 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=247}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithSkipTmp/d202495da3d84797e04c2f9bfdced9fd/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-23T17:31:27,767 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=246}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:31:27,767 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=246}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,1,1732383063971.68752a74e870a82bdc08e1d67f58f487. 2024-11-23T17:31:27,767 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=246}] regionserver.HRegion(1676): Region close journal for 68752a74e870a82bdc08e1d67f58f487: Waiting for close lock at 1732383087763Running coprocessor pre-close hooks at 1732383087763Disabling compacts and flushes for region at 1732383087763Disabling writes for close at 1732383087763Writing region close event to WAL at 1732383087763Running coprocessor post-close hooks at 1732383087767 (+4 ms)Closed at 1732383087767 2024-11-23T17:31:27,768 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=247}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:31:27,768 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=247}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,,1732383063971.d202495da3d84797e04c2f9bfdced9fd. 2024-11-23T17:31:27,768 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=247}] regionserver.HRegion(1676): Region close journal for d202495da3d84797e04c2f9bfdced9fd: Waiting for close lock at 1732383087763Running coprocessor pre-close hooks at 1732383087763Disabling compacts and flushes for region at 1732383087763Disabling writes for close at 1732383087764 (+1 ms)Writing region close event to WAL at 1732383087764Running coprocessor post-close hooks at 1732383087768 (+4 ms)Closed at 1732383087768 2024-11-23T17:31:27,773 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=246}] handler.UnassignRegionHandler(157): Closed 68752a74e870a82bdc08e1d67f58f487 2024-11-23T17:31:27,773 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=245 updating hbase:meta row=68752a74e870a82bdc08e1d67f58f487, regionState=CLOSED 2024-11-23T17:31:27,773 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION, pid=247}] handler.UnassignRegionHandler(157): Closed d202495da3d84797e04c2f9bfdced9fd 2024-11-23T17:31:27,774 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=244 updating hbase:meta row=d202495da3d84797e04c2f9bfdced9fd, regionState=CLOSED 2024-11-23T17:31:27,775 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=246, ppid=245, state=RUNNABLE, hasLock=false; CloseRegionProcedure 68752a74e870a82bdc08e1d67f58f487, server=215bb12e999f,41065,1732382621000 because future has completed 2024-11-23T17:31:27,775 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=247, ppid=244, state=RUNNABLE, hasLock=false; CloseRegionProcedure d202495da3d84797e04c2f9bfdced9fd, server=215bb12e999f,44667,1732382620918 because future has completed 2024-11-23T17:31:27,776 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=246, resume processing ppid=245 2024-11-23T17:31:27,777 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=246, ppid=245, state=SUCCESS, hasLock=false; CloseRegionProcedure 68752a74e870a82bdc08e1d67f58f487, server=215bb12e999f,41065,1732382621000 in 165 msec 2024-11-23T17:31:27,777 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=247, resume processing ppid=244 2024-11-23T17:31:27,777 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=245, ppid=243, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=68752a74e870a82bdc08e1d67f58f487, UNASSIGN in 168 msec 2024-11-23T17:31:27,777 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=247, ppid=244, state=SUCCESS, hasLock=false; CloseRegionProcedure d202495da3d84797e04c2f9bfdced9fd, server=215bb12e999f,44667,1732382620918 in 165 msec 2024-11-23T17:31:27,778 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=244, resume processing ppid=243 2024-11-23T17:31:27,779 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=244, ppid=243, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=d202495da3d84797e04c2f9bfdced9fd, UNASSIGN in 169 msec 2024-11-23T17:31:27,781 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=243, resume processing ppid=242 2024-11-23T17:31:27,781 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=243, ppid=242, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 173 msec 2024-11-23T17:31:27,782 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732383087781"}]},"ts":"1732383087781"} 2024-11-23T17:31:27,783 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=DISABLED in hbase:meta 2024-11-23T17:31:27,783 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithSkipTmp to state=DISABLED 2024-11-23T17:31:27,784 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=242, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 182 msec 2024-11-23T17:31:27,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=242 2024-11-23T17:31:27,916 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-11-23T17:31:27,916 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithSkipTmp 2024-11-23T17:31:27,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] procedure2.ProcedureExecutor(1139): Stored pid=248, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-23T17:31:27,918 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=248, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-23T17:31:27,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithSkipTmp 2024-11-23T17:31:27,919 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=248, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-23T17:31:27,921 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41065 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithSkipTmp 2024-11-23T17:31:27,922 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithSkipTmp/d202495da3d84797e04c2f9bfdced9fd 2024-11-23T17:31:27,922 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithSkipTmp/68752a74e870a82bdc08e1d67f58f487 2024-11-23T17:31:27,923 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithSkipTmp/68752a74e870a82bdc08e1d67f58f487/cf, FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithSkipTmp/68752a74e870a82bdc08e1d67f58f487/recovered.edits] 2024-11-23T17:31:27,923 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithSkipTmp/d202495da3d84797e04c2f9bfdced9fd/cf, FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithSkipTmp/d202495da3d84797e04c2f9bfdced9fd/recovered.edits] 2024-11-23T17:31:27,926 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithSkipTmp/d202495da3d84797e04c2f9bfdced9fd/cf/715f9ebd8b90430da37aa40ffc717038 to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/d202495da3d84797e04c2f9bfdced9fd/cf/715f9ebd8b90430da37aa40ffc717038 2024-11-23T17:31:27,926 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithSkipTmp/68752a74e870a82bdc08e1d67f58f487/cf/55d2759df7fd4801804475275c0b9500 to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/68752a74e870a82bdc08e1d67f58f487/cf/55d2759df7fd4801804475275c0b9500 2024-11-23T17:31:27,946 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithSkipTmp/d202495da3d84797e04c2f9bfdced9fd/recovered.edits/9.seqid to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/d202495da3d84797e04c2f9bfdced9fd/recovered.edits/9.seqid 2024-11-23T17:31:27,946 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithSkipTmp/68752a74e870a82bdc08e1d67f58f487/recovered.edits/9.seqid to hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/68752a74e870a82bdc08e1d67f58f487/recovered.edits/9.seqid 2024-11-23T17:31:27,947 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithSkipTmp/d202495da3d84797e04c2f9bfdced9fd 2024-11-23T17:31:27,947 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testtb-testExportFileSystemStateWithSkipTmp/68752a74e870a82bdc08e1d67f58f487 2024-11-23T17:31:27,947 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithSkipTmp regions 2024-11-23T17:31:27,949 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=248, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-23T17:31:27,951 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithSkipTmp from hbase:meta 2024-11-23T17:31:27,952 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithSkipTmp' descriptor. 2024-11-23T17:31:27,953 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=248, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-23T17:31:27,953 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithSkipTmp' from region states. 2024-11-23T17:31:27,954 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp,,1732383063971.d202495da3d84797e04c2f9bfdced9fd.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732383087953"}]},"ts":"9223372036854775807"} 2024-11-23T17:31:27,954 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp,1,1732383063971.68752a74e870a82bdc08e1d67f58f487.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732383087953"}]},"ts":"9223372036854775807"} 2024-11-23T17:31:27,955 INFO [PEWorker-4 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-23T17:31:27,955 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => d202495da3d84797e04c2f9bfdced9fd, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1732383063971.d202495da3d84797e04c2f9bfdced9fd.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 68752a74e870a82bdc08e1d67f58f487, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1732383063971.68752a74e870a82bdc08e1d67f58f487.', STARTKEY => '1', ENDKEY => ''}] 2024-11-23T17:31:27,956 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithSkipTmp' as deleted. 2024-11-23T17:31:27,956 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732383087956"}]},"ts":"9223372036854775807"} 2024-11-23T17:31:27,957 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithSkipTmp state from META 2024-11-23T17:31:27,958 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=248, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-23T17:31:27,959 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=248, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 42 msec 2024-11-23T17:31:27,998 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45467-0x10168b8ee290001, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-23T17:31:27,998 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x10168b8ee290003, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-23T17:31:27,998 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44667-0x10168b8ee290002, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-23T17:31:27,998 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-23T17:31:27,998 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-11-23T17:31:27,998 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-11-23T17:31:27,998 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-11-23T17:31:28,000 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-11-23T17:31:28,011 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x10168b8ee290003, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-23T17:31:28,011 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-23T17:31:28,011 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45467-0x10168b8ee290001, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-23T17:31:28,011 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44667-0x10168b8ee290002, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-23T17:31:28,011 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:31:28,011 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45467-0x10168b8ee290001, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:31:28,011 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x10168b8ee290003, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:31:28,011 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44667-0x10168b8ee290002, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-23T17:31:28,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=248 2024-11-23T17:31:28,012 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:31:28,012 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:31:28,012 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithSkipTmp 2024-11-23T17:31:28,012 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-11-23T17:31:28,012 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:31:28,012 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-23T17:31:28,017 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithSkipTmp" type: DISABLED 2024-11-23T17:31:28,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-23T17:31:28,020 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithSkipTmp" type: DISABLED 2024-11-23T17:31:28,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-23T17:31:28,040 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithSkipTmp Thread=822 (was 813) Potentially hanging thread: HFileArchiver-27 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-26 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-20 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 135436) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1549690208) connection to localhost/127.0.0.1:39467 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-21 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39467 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1325804072_22 at /127.0.0.1:51134 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1325804072_22 at /127.0.0.1:51172 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-8799 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-22 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #19 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-23 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1325804072_22 at /127.0.0.1:42868 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-194019410_1 at /127.0.0.1:42842 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-194019410_1 at /127.0.0.1:51110 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #20 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=811 (was 824), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1018 (was 1120), ProcessCount=21 (was 24), AvailableMemoryMB=986 (was 808) - AvailableMemoryMB LEAK? - 2024-11-23T17:31:28,040 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=822 is superior to 500 2024-11-23T17:31:28,040 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2377): Stopping mini mapreduce cluster... 2024-11-23T17:31:28,047 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1bc03170{node,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-11-23T17:31:28,049 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@33ae5ff8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T17:31:28,049 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T17:31:28,050 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1bd14f10{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-11-23T17:31:28,050 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@21927721{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/hadoop.log.dir/,STOPPED} 2024-11-23T17:31:28,064 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1732382632246_0011_01_000001 is : 143 2024-11-23T17:31:28,079 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-1_0/usercache/jenkins/appcache/application_1732382632246_0011/container_1732382632246_0011_01_000001/launch_container.sh] 2024-11-23T17:31:28,079 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-1_0/usercache/jenkins/appcache/application_1732382632246_0011/container_1732382632246_0011_01_000001/container_tokens] 2024-11-23T17:31:28,079 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1323292101/yarn-6051925290/MiniMRCluster_1323292101-localDir-nm-1_0/usercache/jenkins/appcache/application_1732382632246_0011/container_1732382632246_0011_01_000001/sysfs] 2024-11-23T17:31:29,806 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp 2024-11-23T17:31:33,303 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-23T17:31:38,092 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T17:31:43,909 WARN [regionserver/215bb12e999f:0.Chore.1 {}] hbase.ExecutorStatusChore(69): RS_COMPACTED_FILES_DISCHARGER's size info, queued: 2, running: 1 2024-11-23T17:31:45,062 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@129022cc{node,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-11-23T17:31:45,063 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@35820016{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T17:31:45,063 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T17:31:45,063 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@274b914{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-11-23T17:31:45,063 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4ada4a0a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/hadoop.log.dir/,STOPPED} 2024-11-23T17:31:45,877 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 5e511dfc454574bea858d074620b30f9, had cached 0 bytes from a total of 8190 2024-11-23T17:31:45,877 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 6f88863827b64acc564823a1044a6b2b, had cached 0 bytes from a total of 5422 2024-11-23T17:32:02,073 ERROR [Thread[Thread-403,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-11-23T17:32:02,074 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@68bb6bdc{cluster,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/cluster} 2024-11-23T17:32:02,074 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@388f6f6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T17:32:02,074 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T17:32:02,075 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2bdadcc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-11-23T17:32:02,075 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@305c2704{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/hadoop.log.dir/,STOPPED} 2024-11-23T17:32:02,077 WARN [ApplicationMaster Launcher {}] amlauncher.ApplicationMasterLauncher$LauncherThread(122): org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher$LauncherThread interrupted. Returning. 2024-11-23T17:32:02,083 ERROR [SchedulerEventDispatcher:Event Processor {}] event.EventDispatcher$EventProcessor(72): Returning, interrupted : java.lang.InterruptedException 2024-11-23T17:32:02,083 ERROR [ResourceManager Event Processor Monitor {}] resourcemanager.ResourceManager$SchedulerEventDispatcher$EventProcessorMonitor(1193): Returning, interrupted : java.lang.InterruptedException: sleep interrupted 2024-11-23T17:32:02,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741830_1006 (size=1152918) 2024-11-23T17:32:02,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741830_1006 (size=1152918) 2024-11-23T17:32:02,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741830_1006 (size=1152918) 2024-11-23T17:32:02,087 ERROR [Thread[Thread-425,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-11-23T17:32:02,091 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5c56c60{jobhistory,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/jobhistory} 2024-11-23T17:32:02,092 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@118b729d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T17:32:02,092 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T17:32:02,092 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@294ca8f9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-11-23T17:32:02,093 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@482db9e0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/hadoop.log.dir/,STOPPED} 2024-11-23T17:32:02,094 ERROR [Thread[Thread-385,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-11-23T17:32:02,094 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2380): Mini mapreduce cluster stopped 2024-11-23T17:32:02,094 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-23T17:32:02,094 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-23T17:32:02,094 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T17:32:02,094 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:32:02,094 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:32:02,094 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-23T17:32:02,095 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-23T17:32:02,095 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1359468130, stopped=false 2024-11-23T17:32:02,095 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:32:02,095 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-11-23T17:32:02,095 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=215bb12e999f,39621,1732382619205 2024-11-23T17:32:02,154 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45467-0x10168b8ee290001, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-23T17:32:02,154 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44667-0x10168b8ee290002, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-23T17:32:02,154 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-23T17:32:02,154 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x10168b8ee290003, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-23T17:32:02,154 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-23T17:32:02,154 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45467-0x10168b8ee290001, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T17:32:02,154 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44667-0x10168b8ee290002, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T17:32:02,154 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T17:32:02,154 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x10168b8ee290003, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T17:32:02,155 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44667-0x10168b8ee290002, quorum=127.0.0.1:57294, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T17:32:02,155 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-23T17:32:02,155 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45467-0x10168b8ee290001, quorum=127.0.0.1:57294, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T17:32:02,155 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T17:32:02,155 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:32:02,155 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T17:32:02,156 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '215bb12e999f,45467,1732382620632' ***** 2024-11-23T17:32:02,156 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41065-0x10168b8ee290003, quorum=127.0.0.1:57294, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-23T17:32:02,157 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:32:02,157 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-23T17:32:02,158 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '215bb12e999f,44667,1732382620918' ***** 2024-11-23T17:32:02,158 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:32:02,158 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-23T17:32:02,158 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '215bb12e999f,41065,1732382621000' ***** 2024-11-23T17:32:02,158 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:32:02,158 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-23T17:32:02,158 INFO [RS:0;215bb12e999f:45467 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-23T17:32:02,158 INFO [RS:1;215bb12e999f:44667 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-23T17:32:02,158 INFO [RS:2;215bb12e999f:41065 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-23T17:32:02,158 INFO [RS:2;215bb12e999f:41065 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-23T17:32:02,158 INFO [RS:0;215bb12e999f:45467 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-23T17:32:02,158 INFO [RS:1;215bb12e999f:44667 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-23T17:32:02,158 INFO [RS:1;215bb12e999f:44667 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-23T17:32:02,158 INFO [RS:0;215bb12e999f:45467 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-23T17:32:02,158 INFO [RS:2;215bb12e999f:41065 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-23T17:32:02,158 INFO [RS:0;215bb12e999f:45467 {}] regionserver.HRegionServer(3091): Received CLOSE for 5e511dfc454574bea858d074620b30f9 2024-11-23T17:32:02,158 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-23T17:32:02,158 INFO [RS:2;215bb12e999f:41065 {}] regionserver.HRegionServer(3091): Received CLOSE for 41264850465b123187f03a78b2521b39 2024-11-23T17:32:02,158 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-23T17:32:02,158 INFO [RS:1;215bb12e999f:44667 {}] regionserver.HRegionServer(3091): Received CLOSE for 6f88863827b64acc564823a1044a6b2b 2024-11-23T17:32:02,159 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-23T17:32:02,159 INFO [RS:2;215bb12e999f:41065 {}] regionserver.HRegionServer(959): stopping server 215bb12e999f,41065,1732382621000 2024-11-23T17:32:02,159 INFO [RS:0;215bb12e999f:45467 {}] regionserver.HRegionServer(959): stopping server 215bb12e999f,45467,1732382620632 2024-11-23T17:32:02,159 INFO [RS:1;215bb12e999f:44667 {}] regionserver.HRegionServer(959): stopping server 215bb12e999f,44667,1732382620918 2024-11-23T17:32:02,159 INFO [RS:2;215bb12e999f:41065 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-23T17:32:02,159 INFO [RS:0;215bb12e999f:45467 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-23T17:32:02,159 INFO [RS:1;215bb12e999f:44667 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-23T17:32:02,159 INFO [RS:2;215bb12e999f:41065 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;215bb12e999f:41065. 2024-11-23T17:32:02,159 INFO [RS:0;215bb12e999f:45467 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;215bb12e999f:45467. 2024-11-23T17:32:02,159 INFO [RS:1;215bb12e999f:44667 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;215bb12e999f:44667. 2024-11-23T17:32:02,159 DEBUG [RS:2;215bb12e999f:41065 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T17:32:02,159 DEBUG [RS:0;215bb12e999f:45467 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T17:32:02,159 DEBUG [RS:1;215bb12e999f:44667 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-23T17:32:02,159 DEBUG [RS:2;215bb12e999f:41065 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:32:02,159 DEBUG [RS:0;215bb12e999f:45467 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:32:02,159 DEBUG [RS:1;215bb12e999f:44667 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:32:02,159 INFO [RS:0;215bb12e999f:45467 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-23T17:32:02,159 INFO [RS:2;215bb12e999f:41065 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-23T17:32:02,159 INFO [RS:1;215bb12e999f:44667 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-23T17:32:02,159 INFO [RS:1;215bb12e999f:44667 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-23T17:32:02,159 DEBUG [RS:2;215bb12e999f:41065 {}] regionserver.HRegionServer(1325): Online Regions={41264850465b123187f03a78b2521b39=hbase:acl,,1732382625522.41264850465b123187f03a78b2521b39.} 2024-11-23T17:32:02,159 DEBUG [RS:0;215bb12e999f:45467 {}] regionserver.HRegionServer(1325): Online Regions={5e511dfc454574bea858d074620b30f9=testExportExpiredSnapshot,1,1732382970480.5e511dfc454574bea858d074620b30f9.} 2024-11-23T17:32:02,159 INFO [RS:1;215bb12e999f:44667 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-23T17:32:02,159 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 6f88863827b64acc564823a1044a6b2b, disabling compactions & flushes 2024-11-23T17:32:02,159 INFO [RS:1;215bb12e999f:44667 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-23T17:32:02,159 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,,1732382970480.6f88863827b64acc564823a1044a6b2b. 2024-11-23T17:32:02,159 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,,1732382970480.6f88863827b64acc564823a1044a6b2b. 2024-11-23T17:32:02,159 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,,1732382970480.6f88863827b64acc564823a1044a6b2b. after waiting 0 ms 2024-11-23T17:32:02,159 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,,1732382970480.6f88863827b64acc564823a1044a6b2b. 2024-11-23T17:32:02,159 DEBUG [RS:0;215bb12e999f:45467 {}] regionserver.HRegionServer(1351): Waiting on 5e511dfc454574bea858d074620b30f9 2024-11-23T17:32:02,159 DEBUG [RS:2;215bb12e999f:41065 {}] regionserver.HRegionServer(1351): Waiting on 41264850465b123187f03a78b2521b39 2024-11-23T17:32:02,159 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 5e511dfc454574bea858d074620b30f9, disabling compactions & flushes 2024-11-23T17:32:02,159 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,1,1732382970480.5e511dfc454574bea858d074620b30f9. 2024-11-23T17:32:02,160 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,1,1732382970480.5e511dfc454574bea858d074620b30f9. 2024-11-23T17:32:02,160 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,1,1732382970480.5e511dfc454574bea858d074620b30f9. after waiting 0 ms 2024-11-23T17:32:02,160 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 41264850465b123187f03a78b2521b39, disabling compactions & flushes 2024-11-23T17:32:02,160 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,1,1732382970480.5e511dfc454574bea858d074620b30f9. 2024-11-23T17:32:02,160 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region hbase:acl,,1732382625522.41264850465b123187f03a78b2521b39. 2024-11-23T17:32:02,160 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:acl,,1732382625522.41264850465b123187f03a78b2521b39. 2024-11-23T17:32:02,160 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on hbase:acl,,1732382625522.41264850465b123187f03a78b2521b39. after waiting 0 ms 2024-11-23T17:32:02,160 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region hbase:acl,,1732382625522.41264850465b123187f03a78b2521b39. 2024-11-23T17:32:02,160 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 41264850465b123187f03a78b2521b39 1/1 column families, dataSize=694 B heapSize=1.74 KB 2024-11-23T17:32:02,161 INFO [RS:1;215bb12e999f:44667 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-23T17:32:02,161 DEBUG [RS:1;215bb12e999f:44667 {}] regionserver.HRegionServer(1325): Online Regions={6f88863827b64acc564823a1044a6b2b=testExportExpiredSnapshot,,1732382970480.6f88863827b64acc564823a1044a6b2b., 1588230740=hbase:meta,,1.1588230740} 2024-11-23T17:32:02,161 DEBUG [RS:1;215bb12e999f:44667 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 6f88863827b64acc564823a1044a6b2b 2024-11-23T17:32:02,161 DEBUG [RS_CLOSE_META-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-23T17:32:02,161 INFO [RS_CLOSE_META-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-23T17:32:02,161 DEBUG [RS_CLOSE_META-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-23T17:32:02,161 DEBUG [RS_CLOSE_META-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-23T17:32:02,161 DEBUG [RS_CLOSE_META-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-23T17:32:02,161 INFO [RS_CLOSE_META-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=85.70 KB heapSize=135.56 KB 2024-11-23T17:32:02,165 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportExpiredSnapshot/5e511dfc454574bea858d074620b30f9/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-23T17:32:02,165 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/default/testExportExpiredSnapshot/6f88863827b64acc564823a1044a6b2b/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-23T17:32:02,165 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:32:02,165 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,1,1732382970480.5e511dfc454574bea858d074620b30f9. 2024-11-23T17:32:02,165 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 5e511dfc454574bea858d074620b30f9: Waiting for close lock at 1732383122159Running coprocessor pre-close hooks at 1732383122159Disabling compacts and flushes for region at 1732383122159Disabling writes for close at 1732383122160 (+1 ms)Writing region close event to WAL at 1732383122161 (+1 ms)Running coprocessor post-close hooks at 1732383122165 (+4 ms)Closed at 1732383122165 2024-11-23T17:32:02,165 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testExportExpiredSnapshot,1,1732382970480.5e511dfc454574bea858d074620b30f9. 2024-11-23T17:32:02,166 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/hbase/acl/41264850465b123187f03a78b2521b39/.tmp/l/699119f893d943c4a07c3fd7e3aca7d2 is 74, key is testtb-testExportFileSystemStateWithMergeRegion-1/l:/1732382968039/DeleteFamily/seqid=0 2024-11-23T17:32:02,179 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:32:02,179 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,,1732382970480.6f88863827b64acc564823a1044a6b2b. 2024-11-23T17:32:02,180 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 6f88863827b64acc564823a1044a6b2b: Waiting for close lock at 1732383122159Running coprocessor pre-close hooks at 1732383122159Disabling compacts and flushes for region at 1732383122159Disabling writes for close at 1732383122159Writing region close event to WAL at 1732383122161 (+2 ms)Running coprocessor post-close hooks at 1732383122179 (+18 ms)Closed at 1732383122179 2024-11-23T17:32:02,180 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testExportExpiredSnapshot,,1732382970480.6f88863827b64acc564823a1044a6b2b. 2024-11-23T17:32:02,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742413_1589 (size=5447) 2024-11-23T17:32:02,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742413_1589 (size=5447) 2024-11-23T17:32:02,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742413_1589 (size=5447) 2024-11-23T17:32:02,186 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=694 B at sequenceid=34 (bloomFilter=false), to=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/hbase/acl/41264850465b123187f03a78b2521b39/.tmp/l/699119f893d943c4a07c3fd7e3aca7d2 2024-11-23T17:32:02,191 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 699119f893d943c4a07c3fd7e3aca7d2 2024-11-23T17:32:02,192 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/hbase/acl/41264850465b123187f03a78b2521b39/.tmp/l/699119f893d943c4a07c3fd7e3aca7d2 as hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/hbase/acl/41264850465b123187f03a78b2521b39/l/699119f893d943c4a07c3fd7e3aca7d2 2024-11-23T17:32:02,194 DEBUG [RS_CLOSE_META-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/hbase/meta/1588230740/.tmp/info/e6aa492234e548ba917311e907584b8e is 173, key is testExportExpiredSnapshot,1,1732382970480.5e511dfc454574bea858d074620b30f9./info:regioninfo/1732382970918/Put/seqid=0 2024-11-23T17:32:02,196 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 699119f893d943c4a07c3fd7e3aca7d2 2024-11-23T17:32:02,196 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/hbase/acl/41264850465b123187f03a78b2521b39/l/699119f893d943c4a07c3fd7e3aca7d2, entries=7, sequenceid=34, filesize=5.3 K 2024-11-23T17:32:02,197 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~694 B/694, heapSize ~1.73 KB/1768, currentSize=0 B/0 for 41264850465b123187f03a78b2521b39 in 37ms, sequenceid=34, compaction requested=false 2024-11-23T17:32:02,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742414_1590 (size=15646) 2024-11-23T17:32:02,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742414_1590 (size=15646) 2024-11-23T17:32:02,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742414_1590 (size=15646) 2024-11-23T17:32:02,205 INFO [RS_CLOSE_META-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=72.69 KB at sequenceid=236 (bloomFilter=true), to=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/hbase/meta/1588230740/.tmp/info/e6aa492234e548ba917311e907584b8e 2024-11-23T17:32:02,205 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/hbase/acl/41264850465b123187f03a78b2521b39/recovered.edits/37.seqid, newMaxSeqId=37, maxSeqId=1 2024-11-23T17:32:02,205 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:32:02,206 INFO [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed hbase:acl,,1732382625522.41264850465b123187f03a78b2521b39. 2024-11-23T17:32:02,206 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 41264850465b123187f03a78b2521b39: Waiting for close lock at 1732383122160Running coprocessor pre-close hooks at 1732383122160Disabling compacts and flushes for region at 1732383122160Disabling writes for close at 1732383122160Obtaining lock to block concurrent updates at 1732383122160Preparing flush snapshotting stores in 41264850465b123187f03a78b2521b39 at 1732383122160Finished memstore snapshotting hbase:acl,,1732382625522.41264850465b123187f03a78b2521b39., syncing WAL and waiting on mvcc, flushsize=dataSize=694, getHeapSize=1768, getOffHeapSize=0, getCellsCount=11 at 1732383122160Flushing stores of hbase:acl,,1732382625522.41264850465b123187f03a78b2521b39. at 1732383122161 (+1 ms)Flushing 41264850465b123187f03a78b2521b39/l: creating writer at 1732383122161Flushing 41264850465b123187f03a78b2521b39/l: appending metadata at 1732383122166 (+5 ms)Flushing 41264850465b123187f03a78b2521b39/l: closing flushed file at 1732383122166Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1ec8522d: reopening flushed file at 1732383122191 (+25 ms)Finished flush of dataSize ~694 B/694, heapSize ~1.73 KB/1768, currentSize=0 B/0 for 41264850465b123187f03a78b2521b39 in 37ms, sequenceid=34, compaction requested=false at 1732383122197 (+6 ms)Writing region close event to WAL at 1732383122202 (+5 ms)Running coprocessor post-close hooks at 1732383122205 (+3 ms)Closed at 1732383122206 (+1 ms) 2024-11-23T17:32:02,206 DEBUG [RS_CLOSE_REGION-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:acl,,1732382625522.41264850465b123187f03a78b2521b39. 2024-11-23T17:32:02,212 INFO [regionserver/215bb12e999f:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-23T17:32:02,224 INFO [regionserver/215bb12e999f:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-23T17:32:02,227 DEBUG [RS_CLOSE_META-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/hbase/meta/1588230740/.tmp/ns/c34c95b1514047b49e47c8ef238b2425 is 124, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1732382945834.90b38de7d4454b3671fca0df1011d7b4./ns:/1732382968201/DeleteFamily/seqid=0 2024-11-23T17:32:02,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742415_1591 (size=8378) 2024-11-23T17:32:02,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742415_1591 (size=8378) 2024-11-23T17:32:02,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742415_1591 (size=8378) 2024-11-23T17:32:02,234 INFO [RS_CLOSE_META-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.79 KB at sequenceid=236 (bloomFilter=true), to=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/hbase/meta/1588230740/.tmp/ns/c34c95b1514047b49e47c8ef238b2425 2024-11-23T17:32:02,240 INFO [regionserver/215bb12e999f:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-23T17:32:02,251 DEBUG [RS_CLOSE_META-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/hbase/meta/1588230740/.tmp/rep_barrier/d55f2b4356e64042b40fad845564b760 is 133, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1732382945834.90b38de7d4454b3671fca0df1011d7b4./rep_barrier:/1732382968201/DeleteFamily/seqid=0 2024-11-23T17:32:02,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742416_1592 (size=8717) 2024-11-23T17:32:02,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742416_1592 (size=8717) 2024-11-23T17:32:02,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742416_1592 (size=8717) 2024-11-23T17:32:02,256 INFO [RS_CLOSE_META-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.95 KB at sequenceid=236 (bloomFilter=true), to=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/hbase/meta/1588230740/.tmp/rep_barrier/d55f2b4356e64042b40fad845564b760 2024-11-23T17:32:02,275 DEBUG [RS_CLOSE_META-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/hbase/meta/1588230740/.tmp/table/4babbadfefe84ac9a5d04e856371c0fe is 127, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1732382945834.90b38de7d4454b3671fca0df1011d7b4./table:/1732382968201/DeleteFamily/seqid=0 2024-11-23T17:32:02,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742417_1593 (size=9531) 2024-11-23T17:32:02,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742417_1593 (size=9531) 2024-11-23T17:32:02,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742417_1593 (size=9531) 2024-11-23T17:32:02,280 INFO [RS_CLOSE_META-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.27 KB at sequenceid=236 (bloomFilter=true), to=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/hbase/meta/1588230740/.tmp/table/4babbadfefe84ac9a5d04e856371c0fe 2024-11-23T17:32:02,284 DEBUG [RS_CLOSE_META-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/hbase/meta/1588230740/.tmp/info/e6aa492234e548ba917311e907584b8e as hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/hbase/meta/1588230740/info/e6aa492234e548ba917311e907584b8e 2024-11-23T17:32:02,288 INFO [RS_CLOSE_META-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/hbase/meta/1588230740/info/e6aa492234e548ba917311e907584b8e, entries=84, sequenceid=236, filesize=15.3 K 2024-11-23T17:32:02,289 DEBUG [RS_CLOSE_META-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/hbase/meta/1588230740/.tmp/ns/c34c95b1514047b49e47c8ef238b2425 as hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/hbase/meta/1588230740/ns/c34c95b1514047b49e47c8ef238b2425 2024-11-23T17:32:02,293 INFO [RS_CLOSE_META-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/hbase/meta/1588230740/ns/c34c95b1514047b49e47c8ef238b2425, entries=28, sequenceid=236, filesize=8.2 K 2024-11-23T17:32:02,294 DEBUG [RS_CLOSE_META-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/hbase/meta/1588230740/.tmp/rep_barrier/d55f2b4356e64042b40fad845564b760 as hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/hbase/meta/1588230740/rep_barrier/d55f2b4356e64042b40fad845564b760 2024-11-23T17:32:02,298 INFO [RS_CLOSE_META-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/hbase/meta/1588230740/rep_barrier/d55f2b4356e64042b40fad845564b760, entries=26, sequenceid=236, filesize=8.5 K 2024-11-23T17:32:02,298 DEBUG [RS_CLOSE_META-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/hbase/meta/1588230740/.tmp/table/4babbadfefe84ac9a5d04e856371c0fe as hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/hbase/meta/1588230740/table/4babbadfefe84ac9a5d04e856371c0fe 2024-11-23T17:32:02,302 INFO [RS_CLOSE_META-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/hbase/meta/1588230740/table/4babbadfefe84ac9a5d04e856371c0fe, entries=43, sequenceid=236, filesize=9.3 K 2024-11-23T17:32:02,303 INFO [RS_CLOSE_META-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~85.70 KB/87755, heapSize ~135.50 KB/138752, currentSize=0 B/0 for 1588230740 in 142ms, sequenceid=236, compaction requested=false 2024-11-23T17:32:02,307 DEBUG [RS_CLOSE_META-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/data/hbase/meta/1588230740/recovered.edits/239.seqid, newMaxSeqId=239, maxSeqId=1 2024-11-23T17:32:02,307 DEBUG [RS_CLOSE_META-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:32:02,307 DEBUG [RS_CLOSE_META-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-23T17:32:02,308 INFO [RS_CLOSE_META-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-23T17:32:02,308 DEBUG [RS_CLOSE_META-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732383122161Running coprocessor pre-close hooks at 1732383122161Disabling compacts and flushes for region at 1732383122161Disabling writes for close at 1732383122161Obtaining lock to block concurrent updates at 1732383122161Preparing flush snapshotting stores in 1588230740 at 1732383122161Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=87755, getHeapSize=138752, getOffHeapSize=0, getCellsCount=663 at 1732383122162 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732383122163 (+1 ms)Flushing 1588230740/info: creating writer at 1732383122163Flushing 1588230740/info: appending metadata at 1732383122194 (+31 ms)Flushing 1588230740/info: closing flushed file at 1732383122194Flushing 1588230740/ns: creating writer at 1732383122210 (+16 ms)Flushing 1588230740/ns: appending metadata at 1732383122226 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1732383122227 (+1 ms)Flushing 1588230740/rep_barrier: creating writer at 1732383122237 (+10 ms)Flushing 1588230740/rep_barrier: appending metadata at 1732383122251 (+14 ms)Flushing 1588230740/rep_barrier: closing flushed file at 1732383122251Flushing 1588230740/table: creating writer at 1732383122259 (+8 ms)Flushing 1588230740/table: appending metadata at 1732383122275 (+16 ms)Flushing 1588230740/table: closing flushed file at 1732383122275Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5947280e: reopening flushed file at 1732383122284 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@48f1de5f: reopening flushed file at 1732383122288 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7c13ffcd: reopening flushed file at 1732383122293 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@59566e41: reopening flushed file at 1732383122298 (+5 ms)Finished flush of dataSize ~85.70 KB/87755, heapSize ~135.50 KB/138752, currentSize=0 B/0 for 1588230740 in 142ms, sequenceid=236, compaction requested=false at 1732383122303 (+5 ms)Writing region close event to WAL at 1732383122304 (+1 ms)Running coprocessor post-close hooks at 1732383122307 (+3 ms)Closed at 1732383122307 2024-11-23T17:32:02,308 DEBUG [RS_CLOSE_META-regionserver/215bb12e999f:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-23T17:32:02,360 INFO [RS:2;215bb12e999f:41065 {}] regionserver.HRegionServer(976): stopping server 215bb12e999f,41065,1732382621000; all regions closed. 2024-11-23T17:32:02,360 INFO [RS:0;215bb12e999f:45467 {}] regionserver.HRegionServer(976): stopping server 215bb12e999f,45467,1732382620632; all regions closed. 2024-11-23T17:32:02,361 INFO [RS:1;215bb12e999f:44667 {}] regionserver.HRegionServer(976): stopping server 215bb12e999f,44667,1732382620918; all regions closed. 2024-11-23T17:32:02,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741836_1012 (size=99973) 2024-11-23T17:32:02,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741835_1011 (size=15467) 2024-11-23T17:32:02,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741836_1012 (size=99973) 2024-11-23T17:32:02,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741836_1012 (size=99973) 2024-11-23T17:32:02,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741833_1009 (size=11026) 2024-11-23T17:32:02,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741835_1011 (size=15467) 2024-11-23T17:32:02,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741833_1009 (size=11026) 2024-11-23T17:32:02,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741833_1009 (size=11026) 2024-11-23T17:32:02,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741835_1011 (size=15467) 2024-11-23T17:32:02,368 DEBUG [RS:0;215bb12e999f:45467 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/oldWALs 2024-11-23T17:32:02,368 DEBUG [RS:1;215bb12e999f:44667 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/oldWALs 2024-11-23T17:32:02,368 DEBUG [RS:2;215bb12e999f:41065 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/oldWALs 2024-11-23T17:32:02,368 INFO [RS:2;215bb12e999f:41065 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 215bb12e999f%2C41065%2C1732382621000:(num 1732382624118) 2024-11-23T17:32:02,368 INFO [RS:0;215bb12e999f:45467 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 215bb12e999f%2C45467%2C1732382620632:(num 1732382624063) 2024-11-23T17:32:02,368 INFO [RS:1;215bb12e999f:44667 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 215bb12e999f%2C44667%2C1732382620918.meta:.meta(num 1732382624892) 2024-11-23T17:32:02,368 DEBUG [RS:0;215bb12e999f:45467 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:32:02,368 DEBUG [RS:2;215bb12e999f:41065 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:32:02,368 INFO [RS:0;215bb12e999f:45467 {}] regionserver.LeaseManager(133): Closed leases 2024-11-23T17:32:02,368 INFO [RS:2;215bb12e999f:41065 {}] regionserver.LeaseManager(133): Closed leases 2024-11-23T17:32:02,368 INFO [RS:2;215bb12e999f:41065 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-23T17:32:02,368 INFO [RS:0;215bb12e999f:45467 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-23T17:32:02,368 INFO [RS:2;215bb12e999f:41065 {}] hbase.ChoreService(370): Chore service for: regionserver/215bb12e999f:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-23T17:32:02,368 INFO [RS:0;215bb12e999f:45467 {}] hbase.ChoreService(370): Chore service for: regionserver/215bb12e999f:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-23T17:32:02,368 INFO [RS:2;215bb12e999f:41065 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-23T17:32:02,368 INFO [RS:0;215bb12e999f:45467 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-23T17:32:02,368 INFO [RS:2;215bb12e999f:41065 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-23T17:32:02,368 INFO [RS:0;215bb12e999f:45467 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-23T17:32:02,368 INFO [RS:2;215bb12e999f:41065 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-23T17:32:02,368 INFO [RS:0;215bb12e999f:45467 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-23T17:32:02,368 INFO [regionserver/215bb12e999f:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-23T17:32:02,368 INFO [RS:2;215bb12e999f:41065 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-23T17:32:02,368 INFO [RS:0;215bb12e999f:45467 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-23T17:32:02,369 INFO [regionserver/215bb12e999f:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-23T17:32:02,369 INFO [RS:2;215bb12e999f:41065 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41065 2024-11-23T17:32:02,369 INFO [RS:0;215bb12e999f:45467 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45467 2024-11-23T17:32:02,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073741834_1010 (size=17082) 2024-11-23T17:32:02,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073741834_1010 (size=17082) 2024-11-23T17:32:02,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073741834_1010 (size=17082) 2024-11-23T17:32:02,373 DEBUG [RS:1;215bb12e999f:44667 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/oldWALs 2024-11-23T17:32:02,373 INFO [RS:1;215bb12e999f:44667 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 215bb12e999f%2C44667%2C1732382620918:(num 1732382624079) 2024-11-23T17:32:02,373 DEBUG [RS:1;215bb12e999f:44667 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-23T17:32:02,373 INFO [RS:1;215bb12e999f:44667 {}] regionserver.LeaseManager(133): Closed leases 2024-11-23T17:32:02,373 INFO [RS:1;215bb12e999f:44667 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-23T17:32:02,373 INFO [RS:1;215bb12e999f:44667 {}] hbase.ChoreService(370): Chore service for: regionserver/215bb12e999f:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-23T17:32:02,373 INFO [RS:1;215bb12e999f:44667 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-23T17:32:02,373 INFO [regionserver/215bb12e999f:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-23T17:32:02,374 INFO [RS:1;215bb12e999f:44667 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44667 2024-11-23T17:32:02,385 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x10168b8ee290003, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/215bb12e999f,41065,1732382621000 2024-11-23T17:32:02,385 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-23T17:32:02,386 INFO [RS:2;215bb12e999f:41065 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-23T17:32:02,398 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45467-0x10168b8ee290001, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/215bb12e999f,45467,1732382620632 2024-11-23T17:32:02,398 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44667-0x10168b8ee290002, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/215bb12e999f,44667,1732382620918 2024-11-23T17:32:02,398 INFO [RS:0;215bb12e999f:45467 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-23T17:32:02,398 INFO [RS:1;215bb12e999f:44667 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-23T17:32:02,398 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [215bb12e999f,41065,1732382621000] 2024-11-23T17:32:02,419 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/215bb12e999f,41065,1732382621000 already deleted, retry=false 2024-11-23T17:32:02,419 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 215bb12e999f,41065,1732382621000 expired; onlineServers=2 2024-11-23T17:32:02,419 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [215bb12e999f,44667,1732382620918] 2024-11-23T17:32:02,429 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/215bb12e999f,44667,1732382620918 already deleted, retry=false 2024-11-23T17:32:02,429 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 215bb12e999f,44667,1732382620918 expired; onlineServers=1 2024-11-23T17:32:02,429 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [215bb12e999f,45467,1732382620632] 2024-11-23T17:32:02,440 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/215bb12e999f,45467,1732382620632 already deleted, retry=false 2024-11-23T17:32:02,440 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 215bb12e999f,45467,1732382620632 expired; onlineServers=0 2024-11-23T17:32:02,440 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '215bb12e999f,39621,1732382619205' ***** 2024-11-23T17:32:02,440 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-23T17:32:02,440 INFO [M:0;215bb12e999f:39621 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-23T17:32:02,440 INFO [M:0;215bb12e999f:39621 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-23T17:32:02,440 DEBUG [M:0;215bb12e999f:39621 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-23T17:32:02,441 DEBUG [M:0;215bb12e999f:39621 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-23T17:32:02,441 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-23T17:32:02,441 DEBUG [master/215bb12e999f:0:becomeActiveMaster-HFileCleaner.large.0-1732382623430 {}] cleaner.HFileCleaner(306): Exit Thread[master/215bb12e999f:0:becomeActiveMaster-HFileCleaner.large.0-1732382623430,5,FailOnTimeoutGroup] 2024-11-23T17:32:02,441 DEBUG [master/215bb12e999f:0:becomeActiveMaster-HFileCleaner.small.0-1732382623431 {}] cleaner.HFileCleaner(306): Exit Thread[master/215bb12e999f:0:becomeActiveMaster-HFileCleaner.small.0-1732382623431,5,FailOnTimeoutGroup] 2024-11-23T17:32:02,441 INFO [M:0;215bb12e999f:39621 {}] hbase.ChoreService(370): Chore service for: master/215bb12e999f:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-23T17:32:02,441 INFO [M:0;215bb12e999f:39621 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-23T17:32:02,441 DEBUG [M:0;215bb12e999f:39621 {}] master.HMaster(1795): Stopping service threads 2024-11-23T17:32:02,441 INFO [M:0;215bb12e999f:39621 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-23T17:32:02,441 INFO [M:0;215bb12e999f:39621 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-23T17:32:02,442 INFO [M:0;215bb12e999f:39621 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-23T17:32:02,442 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-23T17:32:02,451 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-23T17:32:02,451 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-23T17:32:02,451 DEBUG [M:0;215bb12e999f:39621 {}] zookeeper.ZKUtil(347): master:39621-0x10168b8ee290000, quorum=127.0.0.1:57294, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-23T17:32:02,451 WARN [M:0;215bb12e999f:39621 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-23T17:32:02,452 INFO [M:0;215bb12e999f:39621 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/.lastflushedseqids 2024-11-23T17:32:02,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44591 is added to blk_1073742418_1594 (size=302) 2024-11-23T17:32:02,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45743 is added to blk_1073742418_1594 (size=302) 2024-11-23T17:32:02,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46761 is added to blk_1073742418_1594 (size=302) 2024-11-23T17:32:02,463 INFO [M:0;215bb12e999f:39621 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-23T17:32:02,463 INFO [M:0;215bb12e999f:39621 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-23T17:32:02,463 DEBUG [M:0;215bb12e999f:39621 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-23T17:32:02,476 INFO [M:0;215bb12e999f:39621 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T17:32:02,476 DEBUG [M:0;215bb12e999f:39621 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T17:32:02,476 DEBUG [M:0;215bb12e999f:39621 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-23T17:32:02,476 DEBUG [M:0;215bb12e999f:39621 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-23T17:32:02,476 INFO [M:0;215bb12e999f:39621 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=980.05 KB heapSize=1.15 MB 2024-11-23T17:32:02,477 ERROR [AsyncFSWAL-0-hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/MasterData-prefix:215bb12e999f,39621,1732382619205 {}] server.NIOServerCnxnFactory(85): Thread Thread[AsyncFSWAL-0-hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/MasterData-prefix:215bb12e999f,39621,1732382619205,5,FailOnTimeoutGroup] died java.lang.NullPointerException: Cannot invoke "org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf.readableBytes()" because "this.buf" is null at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.buffered(FanOutOneBlockAsyncDFSOutput.java:414) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.append(AsyncProtobufLogWriter.java:134) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:181) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:100) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.appendEntry(AbstractFSWAL.java:1333) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.appendAndSync(AbstractFSWAL.java:1724) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.consume(AbstractFSWAL.java:1832) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T17:32:02,509 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x10168b8ee290003, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T17:32:02,509 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41065-0x10168b8ee290003, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T17:32:02,509 INFO [RS:2;215bb12e999f:41065 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-23T17:32:02,509 INFO [RS:2;215bb12e999f:41065 {}] regionserver.HRegionServer(1031): Exiting; stopping=215bb12e999f,41065,1732382621000; zookeeper connection closed. 2024-11-23T17:32:02,510 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@575f020e {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@575f020e 2024-11-23T17:32:02,519 INFO [RS:1;215bb12e999f:44667 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-23T17:32:02,519 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44667-0x10168b8ee290002, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T17:32:02,519 INFO [RS:1;215bb12e999f:44667 {}] regionserver.HRegionServer(1031): Exiting; stopping=215bb12e999f,44667,1732382620918; zookeeper connection closed. 2024-11-23T17:32:02,519 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44667-0x10168b8ee290002, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T17:32:02,519 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45467-0x10168b8ee290001, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T17:32:02,519 INFO [RS:0;215bb12e999f:45467 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-23T17:32:02,519 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@502f1e3b {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@502f1e3b 2024-11-23T17:32:02,519 INFO [RS:0;215bb12e999f:45467 {}] regionserver.HRegionServer(1031): Exiting; stopping=215bb12e999f,45467,1732382620632; zookeeper connection closed. 2024-11-23T17:32:02,519 DEBUG [pool-63-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45467-0x10168b8ee290001, quorum=127.0.0.1:57294, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-23T17:32:02,520 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@66020e73 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@66020e73 2024-11-23T17:32:02,520 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-11-23T17:32:07,691 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-23T17:32:08,093 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T17:32:09,806 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:32:09,806 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-23T17:32:09,806 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-23T17:32:09,807 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot 2024-11-23T17:32:09,807 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_acl 2024-11-23T17:32:09,807 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:32:09,807 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-11-23T17:32:09,807 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-23T17:32:15,310 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-23T17:32:38,093 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;215bb12e999f:39621 237 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 20 Waited count: 20 Waiting on java.lang.ref.ReferenceQueue$Lock@77f88901 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 20 Waited count: 24 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 27 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 32 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@e2d3eee Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 5727 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 58 Waiting on java.util.concurrent.CountDownLatch$Sync@5e7d3c29 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 12055 Waited count: 12799 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 15 Waited count: 16 Waiting on java.lang.ref.ReferenceQueue$Lock@164f582b Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@71f2ce42 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@70589220): State: TIMED_WAITING Blocked count: 0 Waited count: 1139 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 115 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp2080160363-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007f3f0442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp2080160363-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007f3f0442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp2080160363-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007f3f0442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp2080160363-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007f3f0442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp2080160363-41-acceptor-0@661eade8-ServerConnector@188d52a2{HTTP/1.1, (http/1.1)}{localhost:44391}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp2080160363-42): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp2080160363-43): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp2080160363-44): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-543f2336-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 34 Waited count: 3114 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@30b805bd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 37605): State: TIMED_WAITING Blocked count: 1 Waited count: 58 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 114 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@3019d844): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 191 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@36b49887): State: TIMED_WAITING Blocked count: 0 Waited count: 114 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 191 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 55038 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1381 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15ce9c05 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 37605): State: TIMED_WAITING Blocked count: 104 Waited count: 2496 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 37605): State: TIMED_WAITING Blocked count: 106 Waited count: 2507 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 37605): State: TIMED_WAITING Blocked count: 100 Waited count: 2513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 37605): State: TIMED_WAITING Blocked count: 79 Waited count: 2506 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 37605): State: TIMED_WAITING Blocked count: 86 Waited count: 2514 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@e3b949c): State: TIMED_WAITING Blocked count: 0 Waited count: 285 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@2661f6c3): State: TIMED_WAITING Blocked count: 0 Waited count: 114 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@43480010): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@dd757eb): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(431946690)): State: TIMED_WAITING Blocked count: 0 Waited count: 21 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp503631082-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007f3f0442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp503631082-88-acceptor-0@406e3e45-ServerConnector@6106ba1a{HTTP/1.1, (http/1.1)}{localhost:44969}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp503631082-89): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp503631082-90): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-f5080fd-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@283d96cd): State: TIMED_WAITING Blocked count: 1 Waited count: 1133 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 44097): State: TIMED_WAITING Blocked count: 1 Waited count: 58 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 114 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 1 Waited count: 338 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@76d7e138 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-931987288-172.17.0.2-1732382611165 heartbeating to localhost/127.0.0.1:37605): State: TIMED_WAITING Blocked count: 1402 Waited count: 1606 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@32dcf04c): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 44097): State: TIMED_WAITING Blocked count: 0 Waited count: 567 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 44097): State: TIMED_WAITING Blocked count: 0 Waited count: 570 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 44097): State: TIMED_WAITING Blocked count: 0 Waited count: 573 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 44097): State: TIMED_WAITING Blocked count: 0 Waited count: 571 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 44097): State: TIMED_WAITING Blocked count: 0 Waited count: 568 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 112 (IPC Client (1549690208) connection to localhost/127.0.0.1:37605 from jenkins): State: TIMED_WAITING Blocked count: 1517 Waited count: 1517 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 113 (IPC Parameter Sending Thread for localhost/127.0.0.1:37605): State: TIMED_WAITING Blocked count: 0 Waited count: 2123 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp501822267-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007f3f0442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp501822267-122-acceptor-0@5c07940c-ServerConnector@6d8e07b1{HTTP/1.1, (http/1.1)}{localhost:44921}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp501822267-123): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp501822267-124): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-347aa587-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@74abb991): State: TIMED_WAITING Blocked count: 0 Waited count: 1132 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 34763): State: TIMED_WAITING Blocked count: 1 Waited count: 58 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 114 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 0 Waited count: 254 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@67de2944 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-931987288-172.17.0.2-1732382611165 heartbeating to localhost/127.0.0.1:37605): State: TIMED_WAITING Blocked count: 1396 Waited count: 1606 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@325017cd): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 34763): State: TIMED_WAITING Blocked count: 0 Waited count: 569 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 34763): State: TIMED_WAITING Blocked count: 0 Waited count: 568 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 34763): State: TIMED_WAITING Blocked count: 0 Waited count: 569 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 34763): State: TIMED_WAITING Blocked count: 0 Waited count: 567 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 34763): State: TIMED_WAITING Blocked count: 0 Waited count: 567 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp277826389-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007f3f0442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp277826389-154-acceptor-0@6bc9af82-ServerConnector@ce7b4c8{HTTP/1.1, (http/1.1)}{localhost:44837}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp277826389-155): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp277826389-156): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-3082c1fc-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@10eab4c5): State: TIMED_WAITING Blocked count: 1 Waited count: 1131 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 33341): State: TIMED_WAITING Blocked count: 1 Waited count: 58 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 114 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 0 Waited count: 341 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15db0682 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-931987288-172.17.0.2-1732382611165 heartbeating to localhost/127.0.0.1:37605): State: TIMED_WAITING Blocked count: 1368 Waited count: 1585 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@3a02215f): State: RUNNABLE Blocked count: 17 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 33341): State: TIMED_WAITING Blocked count: 0 Waited count: 573 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 33341): State: TIMED_WAITING Blocked count: 0 Waited count: 573 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 33341): State: TIMED_WAITING Blocked count: 0 Waited count: 570 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 33341): State: TIMED_WAITING Blocked count: 0 Waited count: 575 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 33341): State: TIMED_WAITING Blocked count: 0 Waited count: 567 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data1)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data2)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data3)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data4)): State: TIMED_WAITING Blocked count: 5 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 197 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data4/current/BP-931987288-172.17.0.2-1732382611165): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data2/current/BP-931987288-172.17.0.2-1732382611165): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data3/current/BP-931987288-172.17.0.2-1732382611165): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data1/current/BP-931987288-172.17.0.2-1732382611165): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 207 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 211 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 215 (java.util.concurrent.ThreadPoolExecutor$Worker@607ffc2e[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 218 (java.util.concurrent.ThreadPoolExecutor$Worker@3d8ea566[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 219 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 220 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 225 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data5/current/BP-931987288-172.17.0.2-1732382611165): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 226 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data6/current/BP-931987288-172.17.0.2-1732382611165): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 230 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 233 (java.util.concurrent.ThreadPoolExecutor$Worker@336b3bb6[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 234 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 19 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 237 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 2 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 236 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 12 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 238 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:57294): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 235 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 58 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 239 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 283 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 240 (SyncThread:0): State: WAITING Blocked count: 7 Waited count: 410 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@128992d7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 241 (ProcessThread(sid:0 cport:57294):): State: WAITING Blocked count: 1 Waited count: 513 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b3975e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 242 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 545 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@68c31e9c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 243 (NIOWorkerThread-1): State: WAITING Blocked count: 3 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23b49e59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 245 (LeaseRenewer:jenkins@localhost:37605): State: TIMED_WAITING Blocked count: 16 Waited count: 588 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 254 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@7d75afd2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 508 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 48 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (Time-limited test-SendThread(127.0.0.1:57294)): State: RUNNABLE Blocked count: 19 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 258 (Time-limited test-EventThread): State: WAITING Blocked count: 13 Waited count: 55 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4de4bd09 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 259 (NIOWorkerThread-2): State: WAITING Blocked count: 2 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23b49e59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (NIOWorkerThread-3): State: WAITING Blocked count: 3 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23b49e59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-4): State: WAITING Blocked count: 5 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23b49e59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (zk-event-processor-pool-0): State: WAITING Blocked count: 18 Waited count: 70 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4a25b4ba Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-5): State: WAITING Blocked count: 3 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23b49e59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-6): State: WAITING Blocked count: 5 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23b49e59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-7): State: WAITING Blocked count: 5 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23b49e59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-8): State: WAITING Blocked count: 3 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23b49e59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-9): State: WAITING Blocked count: 2 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23b49e59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-10): State: WAITING Blocked count: 3 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23b49e59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-11): State: WAITING Blocked count: 3 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23b49e59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-12): State: WAITING Blocked count: 5 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23b49e59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-13): State: WAITING Blocked count: 2 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23b49e59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-14): State: WAITING Blocked count: 1 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23b49e59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-15): State: WAITING Blocked count: 4 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23b49e59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-16): State: WAITING Blocked count: 1 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23b49e59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621): State: WAITING Blocked count: 185 Waited count: 690 Waiting on java.util.concurrent.Semaphore$NonfairSync@1a7e8913 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39621): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@3c65ac18 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621): State: WAITING Blocked count: 59 Waited count: 337 Waiting on java.util.concurrent.Semaphore$NonfairSync@54814268 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39621): State: WAITING Blocked count: 76 Waited count: 12599 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3366ac26 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39621): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5dbe8d48 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39621): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5dbe8d48 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=39621): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@4c2aff8b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=39621): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@12e66664 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=39621): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@14e7ff82 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=39621): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@3d1ae49b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 288 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@26d79214 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 289 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 311 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 333 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 126 Waited count: 6 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 286 (M:0;215bb12e999f:39621): State: TIMED_WAITING Blocked count: 12 Waited count: 5001 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1084/0x00007f3f04f9f000.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 356 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 57 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 358 (master/215bb12e999f:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 360 (master/215bb12e999f:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 362 (org.apache.hadoop.hdfs.PeerCache@232a855c): State: TIMED_WAITING Blocked count: 0 Waited count: 188 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 380 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 5574 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 397 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 91 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 398 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 104 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 412 (Idle-Rpc-Conn-Sweeper-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 157 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 423 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 56 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 411 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 55694 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 432 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 64 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 433 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 42 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 458 (RegionServerTracker-0): State: WAITING Blocked count: 7 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7022b069 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 482 (regionserver/215bb12e999f:0.procedureResultReporter): State: WAITING Blocked count: 13 Waited count: 27 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@9ae2ddb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 481 (regionserver/215bb12e999f:0.procedureResultReporter): State: WAITING Blocked count: 22 Waited count: 45 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1e1803b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 480 (regionserver/215bb12e999f:0.procedureResultReporter): State: WAITING Blocked count: 12 Waited count: 25 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@d290dea Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 503 (LeaseRenewer:jenkins.hfs.0@localhost:37605): State: TIMED_WAITING Blocked count: 16 Waited count: 585 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 506 (LeaseRenewer:jenkins.hfs.1@localhost:37605): State: TIMED_WAITING Blocked count: 16 Waited count: 583 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 514 (LeaseRenewer:jenkins.hfs.2@localhost:37605): State: TIMED_WAITING Blocked count: 16 Waited count: 584 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 517 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 527 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 528 (region-location-0): State: WAITING Blocked count: 13 Waited count: 19 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@686d45de Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 410 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 55489 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 536 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 543 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 546 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 8 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 560 (ForkJoinPool.commonPool-worker-1): State: TIMED_WAITING Blocked count: 0 Waited count: 919 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 561 (ForkJoinPool.commonPool-worker-2): State: WAITING Blocked count: 0 Waited count: 166 Waiting on java.util.concurrent.ForkJoinPool@10d57dfd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 583 (region-location-1): State: WAITING Blocked count: 3 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@686d45de Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 585 (region-location-2): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@686d45de Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 987 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 967 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1051 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1081 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1097 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 72 Waited count: 111 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@583bcf84 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1143 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1144 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1145 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1208 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1209 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1210 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1264 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1265 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1266 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1268 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1626 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 48 Waiting on java.util.TaskQueue@5a7a32e8 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1755 (region-location-3): State: WAITING Blocked count: 2 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@686d45de Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1758 (region-location-4): State: WAITING Blocked count: 3 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@686d45de Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2583 (ForkJoinPool.commonPool-worker-5): State: WAITING Blocked count: 0 Waited count: 618 Waiting on java.util.concurrent.ForkJoinPool@10d57dfd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 3757 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5987 (RPCClient-NioEventLoopGroup-6-15): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5988 (RPCClient-NioEventLoopGroup-6-16): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8170 (ForkJoinPool.commonPool-worker-6): State: WAITING Blocked count: 0 Waited count: 80 Waiting on java.util.concurrent.ForkJoinPool@10d57dfd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 10135 (AsyncFSWAL-1-hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/MasterData-prefix:215bb12e999f,39621,1732382619205): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@61fb96cf Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10139 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 5 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 10140 (process reaper): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) 2024-11-23T17:33:08,093 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T17:33:38,093 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T17:33:41,264 DEBUG [master/215bb12e999f:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=7, created chunk count=10, reused chunk count=25, reuseRatio=71.43% 2024-11-23T17:33:41,269 DEBUG [master/215bb12e999f:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-11-23T17:33:50,506 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;215bb12e999f:39621 232 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 20 Waited count: 20 Waiting on java.lang.ref.ReferenceQueue$Lock@77f88901 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 20 Waited count: 25 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 30 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@e2d3eee Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 6327 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 64 Waiting on java.util.concurrent.CountDownLatch$Sync@3783ceba Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 12055 Waited count: 12800 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 15 Waited count: 16 Waiting on java.lang.ref.ReferenceQueue$Lock@164f582b Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@71f2ce42 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@70589220): State: TIMED_WAITING Blocked count: 0 Waited count: 1259 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 127 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp2080160363-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007f3f0442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp2080160363-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007f3f0442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp2080160363-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007f3f0442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp2080160363-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007f3f0442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp2080160363-41-acceptor-0@661eade8-ServerConnector@188d52a2{HTTP/1.1, (http/1.1)}{localhost:44391}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp2080160363-42): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp2080160363-43): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp2080160363-44): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-543f2336-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 34 Waited count: 3114 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@30b805bd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 37605): State: TIMED_WAITING Blocked count: 1 Waited count: 64 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 126 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@3019d844): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 211 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@36b49887): State: TIMED_WAITING Blocked count: 0 Waited count: 126 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 211 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 60987 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1381 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15ce9c05 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 37605): State: TIMED_WAITING Blocked count: 104 Waited count: 2557 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 37605): State: TIMED_WAITING Blocked count: 106 Waited count: 2568 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 37605): State: TIMED_WAITING Blocked count: 100 Waited count: 2573 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 37605): State: TIMED_WAITING Blocked count: 79 Waited count: 2566 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 37605): State: TIMED_WAITING Blocked count: 86 Waited count: 2575 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@e3b949c): State: TIMED_WAITING Blocked count: 0 Waited count: 315 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@2661f6c3): State: TIMED_WAITING Blocked count: 0 Waited count: 126 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@43480010): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@dd757eb): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(431946690)): State: TIMED_WAITING Blocked count: 0 Waited count: 23 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp503631082-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007f3f0442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp503631082-88-acceptor-0@406e3e45-ServerConnector@6106ba1a{HTTP/1.1, (http/1.1)}{localhost:44969}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp503631082-89): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp503631082-90): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-f5080fd-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@283d96cd): State: TIMED_WAITING Blocked count: 1 Waited count: 1253 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 44097): State: TIMED_WAITING Blocked count: 1 Waited count: 64 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 126 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 1 Waited count: 358 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@76d7e138 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-931987288-172.17.0.2-1732382611165 heartbeating to localhost/127.0.0.1:37605): State: TIMED_WAITING Blocked count: 1422 Waited count: 1646 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@32dcf04c): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 44097): State: TIMED_WAITING Blocked count: 0 Waited count: 627 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 44097): State: TIMED_WAITING Blocked count: 0 Waited count: 630 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 44097): State: TIMED_WAITING Blocked count: 0 Waited count: 633 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 44097): State: TIMED_WAITING Blocked count: 0 Waited count: 631 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 44097): State: TIMED_WAITING Blocked count: 0 Waited count: 628 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 112 (IPC Client (1549690208) connection to localhost/127.0.0.1:37605 from jenkins): State: TIMED_WAITING Blocked count: 1577 Waited count: 1577 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 113 (IPC Parameter Sending Thread for localhost/127.0.0.1:37605): State: TIMED_WAITING Blocked count: 0 Waited count: 2183 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp501822267-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007f3f0442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp501822267-122-acceptor-0@5c07940c-ServerConnector@6d8e07b1{HTTP/1.1, (http/1.1)}{localhost:44921}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp501822267-123): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp501822267-124): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-347aa587-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@74abb991): State: TIMED_WAITING Blocked count: 0 Waited count: 1252 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 34763): State: TIMED_WAITING Blocked count: 1 Waited count: 64 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 126 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 0 Waited count: 274 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@67de2944 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-931987288-172.17.0.2-1732382611165 heartbeating to localhost/127.0.0.1:37605): State: TIMED_WAITING Blocked count: 1416 Waited count: 1646 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@325017cd): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 34763): State: TIMED_WAITING Blocked count: 0 Waited count: 629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 34763): State: TIMED_WAITING Blocked count: 0 Waited count: 628 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 34763): State: TIMED_WAITING Blocked count: 0 Waited count: 629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 34763): State: TIMED_WAITING Blocked count: 0 Waited count: 627 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 34763): State: TIMED_WAITING Blocked count: 0 Waited count: 627 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp277826389-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007f3f0442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp277826389-154-acceptor-0@6bc9af82-ServerConnector@ce7b4c8{HTTP/1.1, (http/1.1)}{localhost:44837}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp277826389-155): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp277826389-156): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-3082c1fc-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@10eab4c5): State: TIMED_WAITING Blocked count: 1 Waited count: 1251 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 33341): State: TIMED_WAITING Blocked count: 1 Waited count: 64 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 126 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 0 Waited count: 361 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15db0682 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-931987288-172.17.0.2-1732382611165 heartbeating to localhost/127.0.0.1:37605): State: TIMED_WAITING Blocked count: 1388 Waited count: 1625 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@3a02215f): State: RUNNABLE Blocked count: 17 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 33341): State: TIMED_WAITING Blocked count: 0 Waited count: 641 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 33341): State: TIMED_WAITING Blocked count: 0 Waited count: 633 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 33341): State: TIMED_WAITING Blocked count: 0 Waited count: 630 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 33341): State: TIMED_WAITING Blocked count: 0 Waited count: 644 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 33341): State: TIMED_WAITING Blocked count: 0 Waited count: 627 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data1)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data2)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data3)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data4)): State: TIMED_WAITING Blocked count: 5 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 197 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data4/current/BP-931987288-172.17.0.2-1732382611165): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data2/current/BP-931987288-172.17.0.2-1732382611165): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data3/current/BP-931987288-172.17.0.2-1732382611165): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data1/current/BP-931987288-172.17.0.2-1732382611165): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 207 (pool-23-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42cd358e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 211 (pool-15-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5b2b2389 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 215 (java.util.concurrent.ThreadPoolExecutor$Worker@607ffc2e[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 218 (java.util.concurrent.ThreadPoolExecutor$Worker@3d8ea566[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 219 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 220 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 225 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data5/current/BP-931987288-172.17.0.2-1732382611165): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 226 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data6/current/BP-931987288-172.17.0.2-1732382611165): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 230 (pool-33-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7782937e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 233 (java.util.concurrent.ThreadPoolExecutor$Worker@336b3bb6[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 234 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 21 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 237 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 2 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 236 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 12 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 238 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:57294): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 235 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 64 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 239 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 313 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 240 (SyncThread:0): State: WAITING Blocked count: 7 Waited count: 415 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@128992d7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 241 (ProcessThread(sid:0 cport:57294):): State: WAITING Blocked count: 1 Waited count: 518 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b3975e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 242 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 550 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@68c31e9c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 243 (NIOWorkerThread-1): State: WAITING Blocked count: 3 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23b49e59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 254 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@7d75afd2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 539 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 48 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (Time-limited test-SendThread(127.0.0.1:57294)): State: RUNNABLE Blocked count: 19 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 258 (Time-limited test-EventThread): State: WAITING Blocked count: 13 Waited count: 55 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4de4bd09 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 259 (NIOWorkerThread-2): State: WAITING Blocked count: 2 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23b49e59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (NIOWorkerThread-3): State: WAITING Blocked count: 3 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23b49e59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-4): State: WAITING Blocked count: 5 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23b49e59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (zk-event-processor-pool-0): State: WAITING Blocked count: 18 Waited count: 70 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4a25b4ba Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-5): State: WAITING Blocked count: 3 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23b49e59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-6): State: WAITING Blocked count: 5 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23b49e59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-7): State: WAITING Blocked count: 5 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23b49e59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-8): State: WAITING Blocked count: 3 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23b49e59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-9): State: WAITING Blocked count: 2 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23b49e59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-10): State: WAITING Blocked count: 3 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23b49e59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-11): State: WAITING Blocked count: 3 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23b49e59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-12): State: WAITING Blocked count: 5 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23b49e59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-13): State: WAITING Blocked count: 2 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23b49e59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-14): State: WAITING Blocked count: 1 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23b49e59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-15): State: WAITING Blocked count: 4 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23b49e59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-16): State: WAITING Blocked count: 1 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23b49e59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621): State: WAITING Blocked count: 185 Waited count: 690 Waiting on java.util.concurrent.Semaphore$NonfairSync@1a7e8913 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39621): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@3c65ac18 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621): State: WAITING Blocked count: 59 Waited count: 337 Waiting on java.util.concurrent.Semaphore$NonfairSync@54814268 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39621): State: WAITING Blocked count: 76 Waited count: 12599 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3366ac26 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39621): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5dbe8d48 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39621): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5dbe8d48 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=39621): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@4c2aff8b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=39621): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@12e66664 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=39621): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@14e7ff82 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=39621): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@3d1ae49b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 288 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@26d79214 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 289 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 311 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 333 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 126 Waited count: 6 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 286 (M:0;215bb12e999f:39621): State: TIMED_WAITING Blocked count: 12 Waited count: 5001 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1084/0x00007f3f04f9f000.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 356 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 63 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 358 (master/215bb12e999f:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 360 (master/215bb12e999f:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 362 (org.apache.hadoop.hdfs.PeerCache@232a855c): State: TIMED_WAITING Blocked count: 0 Waited count: 208 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 380 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 6174 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 397 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 91 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 398 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 104 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 412 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 174 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5c1a9bbe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 423 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 62 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 411 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 61695 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 432 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 64 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 433 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 42 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 458 (RegionServerTracker-0): State: WAITING Blocked count: 7 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7022b069 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 482 (regionserver/215bb12e999f:0.procedureResultReporter): State: WAITING Blocked count: 13 Waited count: 27 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@9ae2ddb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 481 (regionserver/215bb12e999f:0.procedureResultReporter): State: WAITING Blocked count: 22 Waited count: 45 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1e1803b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 480 (regionserver/215bb12e999f:0.procedureResultReporter): State: WAITING Blocked count: 12 Waited count: 25 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@d290dea Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 517 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 527 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 528 (region-location-0): State: WAITING Blocked count: 13 Waited count: 19 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@686d45de Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 410 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 61492 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 536 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 543 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 546 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 8 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 561 (ForkJoinPool.commonPool-worker-2): State: WAITING Blocked count: 0 Waited count: 166 Waiting on java.util.concurrent.ForkJoinPool@10d57dfd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 583 (region-location-1): State: WAITING Blocked count: 3 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@686d45de Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 585 (region-location-2): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@686d45de Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 987 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 973 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1051 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1081 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1097 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 72 Waited count: 111 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@583bcf84 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1143 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1144 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1145 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1208 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1209 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1210 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1264 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1265 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1266 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1268 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1626 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 48 Waiting on java.util.TaskQueue@5a7a32e8 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1755 (region-location-3): State: WAITING Blocked count: 2 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@686d45de Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1758 (region-location-4): State: WAITING Blocked count: 3 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@686d45de Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2583 (ForkJoinPool.commonPool-worker-5): State: TIMED_WAITING Blocked count: 0 Waited count: 619 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 3757 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5987 (RPCClient-NioEventLoopGroup-6-15): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5988 (RPCClient-NioEventLoopGroup-6-16): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8170 (ForkJoinPool.commonPool-worker-6): State: WAITING Blocked count: 0 Waited count: 80 Waiting on java.util.concurrent.ForkJoinPool@10d57dfd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 10135 (AsyncFSWAL-1-hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/MasterData-prefix:215bb12e999f,39621,1732382619205): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@61fb96cf Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10140 (process reaper): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10144 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-11-23T17:34:08,094 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T17:34:38,094 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;215bb12e999f:39621 231 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 20 Waited count: 20 Waiting on java.lang.ref.ReferenceQueue$Lock@77f88901 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 20 Waited count: 26 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 33 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 38 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@e2d3eee Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 6926 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 70 Waiting on java.util.concurrent.CountDownLatch$Sync@170673d7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 12055 Waited count: 12801 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 15 Waited count: 16 Waiting on java.lang.ref.ReferenceQueue$Lock@164f582b Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@71f2ce42 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@70589220): State: TIMED_WAITING Blocked count: 0 Waited count: 1379 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 139 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp2080160363-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007f3f0442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp2080160363-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007f3f0442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp2080160363-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007f3f0442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp2080160363-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007f3f0442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp2080160363-41-acceptor-0@661eade8-ServerConnector@188d52a2{HTTP/1.1, (http/1.1)}{localhost:44391}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp2080160363-42): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp2080160363-43): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp2080160363-44): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-543f2336-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 34 Waited count: 3114 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@30b805bd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 37605): State: TIMED_WAITING Blocked count: 1 Waited count: 70 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 138 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@3019d844): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 231 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@36b49887): State: TIMED_WAITING Blocked count: 0 Waited count: 138 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 231 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 66939 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1381 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15ce9c05 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 37605): State: TIMED_WAITING Blocked count: 104 Waited count: 2618 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 37605): State: TIMED_WAITING Blocked count: 106 Waited count: 2629 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 37605): State: TIMED_WAITING Blocked count: 100 Waited count: 2635 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 37605): State: TIMED_WAITING Blocked count: 79 Waited count: 2627 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 37605): State: TIMED_WAITING Blocked count: 86 Waited count: 2636 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@e3b949c): State: TIMED_WAITING Blocked count: 0 Waited count: 345 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@2661f6c3): State: TIMED_WAITING Blocked count: 0 Waited count: 138 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@43480010): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@dd757eb): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(431946690)): State: TIMED_WAITING Blocked count: 0 Waited count: 25 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp503631082-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007f3f0442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp503631082-88-acceptor-0@406e3e45-ServerConnector@6106ba1a{HTTP/1.1, (http/1.1)}{localhost:44969}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp503631082-89): State: TIMED_WAITING Blocked count: 0 Waited count: 14 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp503631082-90): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-f5080fd-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@283d96cd): State: TIMED_WAITING Blocked count: 1 Waited count: 1373 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 44097): State: TIMED_WAITING Blocked count: 1 Waited count: 70 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 138 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 1 Waited count: 378 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@76d7e138 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-931987288-172.17.0.2-1732382611165 heartbeating to localhost/127.0.0.1:37605): State: TIMED_WAITING Blocked count: 1442 Waited count: 1686 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@32dcf04c): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 44097): State: TIMED_WAITING Blocked count: 0 Waited count: 687 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 44097): State: TIMED_WAITING Blocked count: 0 Waited count: 690 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 44097): State: TIMED_WAITING Blocked count: 0 Waited count: 693 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 44097): State: TIMED_WAITING Blocked count: 0 Waited count: 691 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 44097): State: TIMED_WAITING Blocked count: 0 Waited count: 688 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 112 (IPC Client (1549690208) connection to localhost/127.0.0.1:37605 from jenkins): State: TIMED_WAITING Blocked count: 1637 Waited count: 1637 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 113 (IPC Parameter Sending Thread for localhost/127.0.0.1:37605): State: TIMED_WAITING Blocked count: 0 Waited count: 2243 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp501822267-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007f3f0442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp501822267-122-acceptor-0@5c07940c-ServerConnector@6d8e07b1{HTTP/1.1, (http/1.1)}{localhost:44921}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp501822267-123): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp501822267-124): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-347aa587-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@74abb991): State: TIMED_WAITING Blocked count: 0 Waited count: 1373 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 34763): State: TIMED_WAITING Blocked count: 1 Waited count: 70 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 138 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 0 Waited count: 294 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@67de2944 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-931987288-172.17.0.2-1732382611165 heartbeating to localhost/127.0.0.1:37605): State: TIMED_WAITING Blocked count: 1436 Waited count: 1686 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@325017cd): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 34763): State: TIMED_WAITING Blocked count: 0 Waited count: 689 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 34763): State: TIMED_WAITING Blocked count: 0 Waited count: 688 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 34763): State: TIMED_WAITING Blocked count: 0 Waited count: 689 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 34763): State: TIMED_WAITING Blocked count: 0 Waited count: 687 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 34763): State: TIMED_WAITING Blocked count: 0 Waited count: 687 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp277826389-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007f3f0442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp277826389-154-acceptor-0@6bc9af82-ServerConnector@ce7b4c8{HTTP/1.1, (http/1.1)}{localhost:44837}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp277826389-155): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp277826389-156): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-3082c1fc-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@10eab4c5): State: TIMED_WAITING Blocked count: 1 Waited count: 1371 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 33341): State: TIMED_WAITING Blocked count: 1 Waited count: 70 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 138 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 0 Waited count: 381 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15db0682 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-931987288-172.17.0.2-1732382611165 heartbeating to localhost/127.0.0.1:37605): State: TIMED_WAITING Blocked count: 1408 Waited count: 1665 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@3a02215f): State: RUNNABLE Blocked count: 17 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 33341): State: TIMED_WAITING Blocked count: 0 Waited count: 707 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 33341): State: TIMED_WAITING Blocked count: 0 Waited count: 693 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 33341): State: TIMED_WAITING Blocked count: 0 Waited count: 690 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 33341): State: TIMED_WAITING Blocked count: 0 Waited count: 706 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 33341): State: TIMED_WAITING Blocked count: 0 Waited count: 687 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data1)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data2)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data3)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data4)): State: TIMED_WAITING Blocked count: 5 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 197 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data4/current/BP-931987288-172.17.0.2-1732382611165): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data2/current/BP-931987288-172.17.0.2-1732382611165): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data3/current/BP-931987288-172.17.0.2-1732382611165): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data1/current/BP-931987288-172.17.0.2-1732382611165): State: TIMED_WAITING Blocked count: 4 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 207 (pool-23-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42cd358e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 211 (pool-15-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5b2b2389 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 215 (java.util.concurrent.ThreadPoolExecutor$Worker@607ffc2e[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 218 (java.util.concurrent.ThreadPoolExecutor$Worker@3d8ea566[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 219 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 220 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 225 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data5/current/BP-931987288-172.17.0.2-1732382611165): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 226 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data6/current/BP-931987288-172.17.0.2-1732382611165): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 230 (pool-33-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7782937e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 233 (java.util.concurrent.ThreadPoolExecutor$Worker@336b3bb6[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 234 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 23 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 237 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 2 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 236 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 12 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 238 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:57294): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 235 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 70 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 239 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 343 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 240 (SyncThread:0): State: WAITING Blocked count: 7 Waited count: 419 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@128992d7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 241 (ProcessThread(sid:0 cport:57294):): State: WAITING Blocked count: 1 Waited count: 522 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b3975e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 242 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 554 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@68c31e9c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 243 (NIOWorkerThread-1): State: WAITING Blocked count: 3 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23b49e59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 254 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@7d75afd2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 567 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 48 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (Time-limited test-SendThread(127.0.0.1:57294)): State: RUNNABLE Blocked count: 19 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 258 (Time-limited test-EventThread): State: WAITING Blocked count: 13 Waited count: 55 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4de4bd09 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 259 (NIOWorkerThread-2): State: WAITING Blocked count: 2 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23b49e59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (NIOWorkerThread-3): State: WAITING Blocked count: 3 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23b49e59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-4): State: WAITING Blocked count: 5 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23b49e59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (zk-event-processor-pool-0): State: WAITING Blocked count: 18 Waited count: 70 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4a25b4ba Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-5): State: WAITING Blocked count: 3 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23b49e59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-6): State: WAITING Blocked count: 5 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23b49e59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-7): State: WAITING Blocked count: 5 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23b49e59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-8): State: WAITING Blocked count: 3 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23b49e59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-9): State: WAITING Blocked count: 2 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23b49e59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-10): State: WAITING Blocked count: 3 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23b49e59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-11): State: WAITING Blocked count: 3 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23b49e59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-12): State: WAITING Blocked count: 5 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23b49e59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-13): State: WAITING Blocked count: 2 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23b49e59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-14): State: WAITING Blocked count: 1 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23b49e59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-15): State: WAITING Blocked count: 4 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23b49e59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-16): State: WAITING Blocked count: 1 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23b49e59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621): State: WAITING Blocked count: 185 Waited count: 690 Waiting on java.util.concurrent.Semaphore$NonfairSync@1a7e8913 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39621): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@3c65ac18 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621): State: WAITING Blocked count: 59 Waited count: 337 Waiting on java.util.concurrent.Semaphore$NonfairSync@54814268 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39621): State: WAITING Blocked count: 76 Waited count: 12599 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3366ac26 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39621): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5dbe8d48 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39621): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5dbe8d48 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=39621): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@4c2aff8b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=39621): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@12e66664 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=39621): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@14e7ff82 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=39621): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@3d1ae49b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 288 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@26d79214 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 289 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 311 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 333 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 126 Waited count: 6 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 286 (M:0;215bb12e999f:39621): State: TIMED_WAITING Blocked count: 12 Waited count: 5001 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1084/0x00007f3f04f9f000.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 356 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 69 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 358 (master/215bb12e999f:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 360 (master/215bb12e999f:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 362 (org.apache.hadoop.hdfs.PeerCache@232a855c): State: TIMED_WAITING Blocked count: 0 Waited count: 228 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 380 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 6773 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 397 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 91 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 398 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 104 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 412 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 174 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5c1a9bbe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 423 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 68 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 411 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 67698 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 432 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 64 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 433 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 42 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 458 (RegionServerTracker-0): State: WAITING Blocked count: 7 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7022b069 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 482 (regionserver/215bb12e999f:0.procedureResultReporter): State: WAITING Blocked count: 13 Waited count: 27 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@9ae2ddb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 481 (regionserver/215bb12e999f:0.procedureResultReporter): State: WAITING Blocked count: 22 Waited count: 45 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1e1803b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 480 (regionserver/215bb12e999f:0.procedureResultReporter): State: WAITING Blocked count: 12 Waited count: 25 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@d290dea Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 517 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 527 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 528 (region-location-0): State: WAITING Blocked count: 13 Waited count: 19 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@686d45de Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 410 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 67494 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 536 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 543 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 546 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 8 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 561 (ForkJoinPool.commonPool-worker-2): State: WAITING Blocked count: 0 Waited count: 166 Waiting on java.util.concurrent.ForkJoinPool@10d57dfd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 583 (region-location-1): State: WAITING Blocked count: 3 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@686d45de Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 585 (region-location-2): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@686d45de Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 987 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 979 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1051 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1081 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1097 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 72 Waited count: 111 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@583bcf84 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1143 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1144 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1145 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1208 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1209 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1210 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1264 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1265 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1266 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1268 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1626 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 48 Waiting on java.util.TaskQueue@5a7a32e8 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1755 (region-location-3): State: WAITING Blocked count: 2 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@686d45de Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1758 (region-location-4): State: WAITING Blocked count: 3 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@686d45de Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3757 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5987 (RPCClient-NioEventLoopGroup-6-15): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5988 (RPCClient-NioEventLoopGroup-6-16): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8170 (ForkJoinPool.commonPool-worker-6): State: TIMED_WAITING Blocked count: 0 Waited count: 81 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 10135 (AsyncFSWAL-1-hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/MasterData-prefix:215bb12e999f,39621,1732382619205): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@61fb96cf Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10140 (process reaper): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10144 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-11-23T17:35:08,094 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T17:35:38,094 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;215bb12e999f:39621 229 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 20 Waited count: 20 Waiting on java.lang.ref.ReferenceQueue$Lock@77f88901 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 20 Waited count: 27 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 36 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 41 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@e2d3eee Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 7526 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 76 Waiting on java.util.concurrent.CountDownLatch$Sync@140973d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 12055 Waited count: 12802 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 15 Waited count: 16 Waiting on java.lang.ref.ReferenceQueue$Lock@164f582b Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@71f2ce42 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@70589220): State: TIMED_WAITING Blocked count: 0 Waited count: 1499 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 151 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp2080160363-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007f3f0442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp2080160363-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007f3f0442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp2080160363-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007f3f0442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp2080160363-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007f3f0442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp2080160363-41-acceptor-0@661eade8-ServerConnector@188d52a2{HTTP/1.1, (http/1.1)}{localhost:44391}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp2080160363-42): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp2080160363-43): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp2080160363-44): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-543f2336-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 34 Waited count: 3114 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@30b805bd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 37605): State: TIMED_WAITING Blocked count: 1 Waited count: 76 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 150 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@3019d844): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 251 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@36b49887): State: TIMED_WAITING Blocked count: 0 Waited count: 150 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 251 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 72890 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1381 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15ce9c05 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 37605): State: TIMED_WAITING Blocked count: 104 Waited count: 2679 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 37605): State: TIMED_WAITING Blocked count: 106 Waited count: 2690 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 37605): State: TIMED_WAITING Blocked count: 100 Waited count: 2696 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 37605): State: TIMED_WAITING Blocked count: 79 Waited count: 2689 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 37605): State: TIMED_WAITING Blocked count: 86 Waited count: 2697 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@e3b949c): State: TIMED_WAITING Blocked count: 0 Waited count: 375 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@2661f6c3): State: TIMED_WAITING Blocked count: 0 Waited count: 150 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@43480010): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@dd757eb): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(431946690)): State: TIMED_WAITING Blocked count: 0 Waited count: 27 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp503631082-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007f3f0442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp503631082-88-acceptor-0@406e3e45-ServerConnector@6106ba1a{HTTP/1.1, (http/1.1)}{localhost:44969}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp503631082-89): State: TIMED_WAITING Blocked count: 0 Waited count: 15 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp503631082-90): State: TIMED_WAITING Blocked count: 0 Waited count: 14 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-f5080fd-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@283d96cd): State: TIMED_WAITING Blocked count: 1 Waited count: 1493 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 44097): State: TIMED_WAITING Blocked count: 1 Waited count: 76 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 150 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 1 Waited count: 398 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@76d7e138 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-931987288-172.17.0.2-1732382611165 heartbeating to localhost/127.0.0.1:37605): State: TIMED_WAITING Blocked count: 1462 Waited count: 1726 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@32dcf04c): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 44097): State: TIMED_WAITING Blocked count: 0 Waited count: 748 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 44097): State: TIMED_WAITING Blocked count: 0 Waited count: 750 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 44097): State: TIMED_WAITING Blocked count: 0 Waited count: 753 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 44097): State: TIMED_WAITING Blocked count: 0 Waited count: 751 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 44097): State: TIMED_WAITING Blocked count: 0 Waited count: 748 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 112 (IPC Client (1549690208) connection to localhost/127.0.0.1:37605 from jenkins): State: TIMED_WAITING Blocked count: 1697 Waited count: 1697 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 113 (IPC Parameter Sending Thread for localhost/127.0.0.1:37605): State: TIMED_WAITING Blocked count: 0 Waited count: 2303 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp501822267-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007f3f0442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp501822267-122-acceptor-0@5c07940c-ServerConnector@6d8e07b1{HTTP/1.1, (http/1.1)}{localhost:44921}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp501822267-123): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp501822267-124): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-347aa587-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@74abb991): State: TIMED_WAITING Blocked count: 0 Waited count: 1493 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 34763): State: TIMED_WAITING Blocked count: 1 Waited count: 76 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 150 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 0 Waited count: 314 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@67de2944 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-931987288-172.17.0.2-1732382611165 heartbeating to localhost/127.0.0.1:37605): State: TIMED_WAITING Blocked count: 1456 Waited count: 1726 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@325017cd): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 34763): State: TIMED_WAITING Blocked count: 0 Waited count: 749 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 34763): State: TIMED_WAITING Blocked count: 0 Waited count: 748 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 34763): State: TIMED_WAITING Blocked count: 0 Waited count: 749 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 34763): State: TIMED_WAITING Blocked count: 0 Waited count: 747 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 34763): State: TIMED_WAITING Blocked count: 0 Waited count: 747 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp277826389-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007f3f0442d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp277826389-154-acceptor-0@6bc9af82-ServerConnector@ce7b4c8{HTTP/1.1, (http/1.1)}{localhost:44837}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp277826389-155): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp277826389-156): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-3082c1fc-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@10eab4c5): State: TIMED_WAITING Blocked count: 1 Waited count: 1491 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 33341): State: TIMED_WAITING Blocked count: 1 Waited count: 76 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 150 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 0 Waited count: 401 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15db0682 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-931987288-172.17.0.2-1732382611165 heartbeating to localhost/127.0.0.1:37605): State: TIMED_WAITING Blocked count: 1428 Waited count: 1705 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@3a02215f): State: RUNNABLE Blocked count: 17 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 33341): State: TIMED_WAITING Blocked count: 0 Waited count: 780 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 33341): State: TIMED_WAITING Blocked count: 0 Waited count: 753 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 33341): State: TIMED_WAITING Blocked count: 0 Waited count: 750 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 33341): State: TIMED_WAITING Blocked count: 0 Waited count: 772 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 33341): State: TIMED_WAITING Blocked count: 0 Waited count: 747 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data1)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data2)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data3)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 188 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data4)): State: TIMED_WAITING Blocked count: 5 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 197 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data4/current/BP-931987288-172.17.0.2-1732382611165): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data2/current/BP-931987288-172.17.0.2-1732382611165): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data3/current/BP-931987288-172.17.0.2-1732382611165): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data1/current/BP-931987288-172.17.0.2-1732382611165): State: TIMED_WAITING Blocked count: 4 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 207 (pool-23-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@42cd358e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 211 (pool-15-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5b2b2389 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 215 (java.util.concurrent.ThreadPoolExecutor$Worker@607ffc2e[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 218 (java.util.concurrent.ThreadPoolExecutor$Worker@3d8ea566[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 219 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 220 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 225 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data5/current/BP-931987288-172.17.0.2-1732382611165): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 226 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data6/current/BP-931987288-172.17.0.2-1732382611165): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 230 (pool-33-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7782937e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 233 (java.util.concurrent.ThreadPoolExecutor$Worker@336b3bb6[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 234 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 25 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 237 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 2 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 236 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 12 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 238 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:57294): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 235 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 76 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 239 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 373 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 240 (SyncThread:0): State: WAITING Blocked count: 7 Waited count: 423 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@128992d7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 241 (ProcessThread(sid:0 cport:57294):): State: WAITING Blocked count: 1 Waited count: 526 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@b3975e6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 242 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 558 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@68c31e9c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 243 (NIOWorkerThread-1): State: WAITING Blocked count: 3 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23b49e59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 254 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@7d75afd2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 595 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 48 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (Time-limited test-SendThread(127.0.0.1:57294)): State: RUNNABLE Blocked count: 19 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 258 (Time-limited test-EventThread): State: WAITING Blocked count: 13 Waited count: 55 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4de4bd09 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 259 (NIOWorkerThread-2): State: WAITING Blocked count: 2 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23b49e59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (NIOWorkerThread-3): State: WAITING Blocked count: 3 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23b49e59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-4): State: WAITING Blocked count: 5 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23b49e59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (zk-event-processor-pool-0): State: WAITING Blocked count: 18 Waited count: 70 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4a25b4ba Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-5): State: WAITING Blocked count: 3 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23b49e59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-6): State: WAITING Blocked count: 5 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23b49e59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-7): State: WAITING Blocked count: 5 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23b49e59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-8): State: WAITING Blocked count: 3 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23b49e59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-9): State: WAITING Blocked count: 2 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23b49e59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-10): State: WAITING Blocked count: 3 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23b49e59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-11): State: WAITING Blocked count: 3 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23b49e59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-12): State: WAITING Blocked count: 5 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23b49e59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-13): State: WAITING Blocked count: 2 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23b49e59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-14): State: WAITING Blocked count: 1 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23b49e59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-15): State: WAITING Blocked count: 4 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23b49e59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-16): State: WAITING Blocked count: 1 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@23b49e59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621): State: WAITING Blocked count: 185 Waited count: 690 Waiting on java.util.concurrent.Semaphore$NonfairSync@1a7e8913 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39621): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@3c65ac18 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621): State: WAITING Blocked count: 59 Waited count: 337 Waiting on java.util.concurrent.Semaphore$NonfairSync@54814268 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39621): State: WAITING Blocked count: 76 Waited count: 12599 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3366ac26 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39621): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5dbe8d48 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39621): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5dbe8d48 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=39621): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@4c2aff8b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=39621): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@12e66664 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=39621): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@14e7ff82 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=39621): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@3d1ae49b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 288 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@26d79214 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 289 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 311 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 333 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 126 Waited count: 6 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 286 (M:0;215bb12e999f:39621): State: TIMED_WAITING Blocked count: 12 Waited count: 5001 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1084/0x00007f3f04f9f000.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 356 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 75 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 358 (master/215bb12e999f:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 360 (master/215bb12e999f:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 362 (org.apache.hadoop.hdfs.PeerCache@232a855c): State: TIMED_WAITING Blocked count: 0 Waited count: 248 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 380 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 7373 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 397 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 91 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 398 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 104 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 412 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 174 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5c1a9bbe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 423 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 74 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 411 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 73700 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 432 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 64 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 433 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 42 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 458 (RegionServerTracker-0): State: WAITING Blocked count: 7 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7022b069 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 482 (regionserver/215bb12e999f:0.procedureResultReporter): State: WAITING Blocked count: 13 Waited count: 27 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@9ae2ddb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 481 (regionserver/215bb12e999f:0.procedureResultReporter): State: WAITING Blocked count: 22 Waited count: 45 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1e1803b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 480 (regionserver/215bb12e999f:0.procedureResultReporter): State: WAITING Blocked count: 12 Waited count: 25 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@d290dea Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 517 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 527 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 528 (region-location-0): State: WAITING Blocked count: 13 Waited count: 19 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@686d45de Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 410 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 73497 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 536 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 543 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 546 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 8 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 561 (ForkJoinPool.commonPool-worker-2): State: TIMED_WAITING Blocked count: 0 Waited count: 167 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 583 (region-location-1): State: WAITING Blocked count: 3 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@686d45de Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 585 (region-location-2): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@686d45de Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 987 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 985 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1051 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1081 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1097 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 72 Waited count: 111 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@583bcf84 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1143 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1144 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1145 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1208 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1209 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1210 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1264 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1265 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1266 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1268 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1626 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 48 Waiting on java.util.TaskQueue@5a7a32e8 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1755 (region-location-3): State: WAITING Blocked count: 2 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@686d45de Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1758 (region-location-4): State: WAITING Blocked count: 3 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@686d45de Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3757 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5987 (RPCClient-NioEventLoopGroup-6-15): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5988 (RPCClient-NioEventLoopGroup-6-16): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10135 (AsyncFSWAL-1-hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/MasterData-prefix:215bb12e999f,39621,1732382619205): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@61fb96cf Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10144 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 14 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-11-23T17:36:08,094 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-23T17:36:28,693 DEBUG [Time-limited test {}] hbase.LocalHBaseCluster(398): Interrupted java.lang.InterruptedException: null at java.lang.Object.wait(Native Method) ~[?:?] at java.lang.Thread.join(Thread.java:1307) ~[?:?] at org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:111) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) ~[test-classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-23T17:36:28,696 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2da75756{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T17:36:28,696 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@ce7b4c8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T17:36:28,696 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T17:36:28,696 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@15a0cd2e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T17:36:28,696 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@20aa1485{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/hadoop.log.dir/,STOPPED} 2024-11-23T17:36:28,699 WARN [BP-931987288-172.17.0.2-1732382611165 heartbeating to localhost/127.0.0.1:37605 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-23T17:36:28,699 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-23T17:36:28,699 WARN [BP-931987288-172.17.0.2-1732382611165 heartbeating to localhost/127.0.0.1:37605 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-931987288-172.17.0.2-1732382611165 (Datanode Uuid a3f5f17c-2fc0-43ec-b0ce-1f425654c4a0) service to localhost/127.0.0.1:37605 2024-11-23T17:36:28,699 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-23T17:36:28,701 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data5/current/BP-931987288-172.17.0.2-1732382611165 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T17:36:28,701 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data6/current/BP-931987288-172.17.0.2-1732382611165 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T17:36:28,702 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func ====> TEST TIMED OUT. PRINTING THREAD DUMP. <==== Timestamp: 2024-11-23 05:36:28,697 "NIOWorkerThread-7" daemon prio=5 tid=265 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "org.apache.hadoop.util.JvmPauseMonitor$Monitor@70589220" daemon prio=5 tid=34 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner" daemon prio=5 tid=23 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/java.lang.Object.wait(Native Method) at java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) at java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) at app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Socket Reader #1 for port 0" daemon prio=5 tid=95 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) at app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) "MiniHBaseClusterRegionServer-EventLoopGroup-3-1" daemon prio=10 tid=289 runnable java.lang.Thread.State: RUNNABLE at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "qtp2080160363-43" daemon prio=5 tid=43 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Session-HouseKeeper-f5080fd-1" prio=5 tid=91 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server listener on 0" daemon prio=5 tid=94 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) "qtp501822267-122-acceptor-0@5c07940c-ServerConnector@6d8e07b1{HTTP/1.1, (http/1.1)}{localhost:44921}" daemon prio=3 tid=122 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) at java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) at java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) at app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) at app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) at app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "AsyncFSWAL-1-hdfs://localhost:37605/user/jenkins/test-data/78f33c35-b526-0410-d006-67de123947bf/MasterData-prefix:215bb12e999f,39621,1732382619205" daemon prio=5 tid=10135 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Hadoop-Metrics-Updater-0" daemon prio=5 tid=132 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server handler 1 on default port 33341" daemon prio=5 tid=171 terminated java.lang.Thread.State: TERMINATED at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@e3b949c" daemon prio=5 tid=71 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RPCClient-NioEventLoopGroup-6-6" daemon prio=5 tid=1144 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RPCClient-NioEventLoopGroup-6-12" daemon prio=5 tid=1266 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "pool-20-thread-1" prio=5 tid=103 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "region-location-0" daemon prio=5 tid=528 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server handler 0 on default port 44097" daemon prio=5 tid=104 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "Time-limited test" daemon prio=5 tid=22 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at app//org.apache.hadoop.util.concurrent.HadoopExecutors.shutdown(HadoopExecutors.java:116) at app//org.apache.hadoop.hdfs.server.datanode.DataNode.shutdown(DataNode.java:2597) at app//org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNode(MiniDFSCluster.java:2232) at app//org.apache.hadoop.hdfs.MiniDFSCluster.shutdownDataNodes(MiniDFSCluster.java:2222) at app//org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2201) at app//org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2174) at app//org.apache.hadoop.hdfs.MiniDFSCluster.shutdown(MiniDFSCluster.java:2167) at app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniDFSCluster(HBaseTestingUtil.java:761) at app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1021) at app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) at java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) at app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at app//org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at app//org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=39621" daemon prio=5 tid=284 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) at java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) at app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) at app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) "IPC Server handler 2 on default port 37605" daemon prio=5 tid=66 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "qtp2080160363-42" daemon prio=5 tid=42 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "BP-931987288-172.17.0.2-1732382611165 heartbeating to localhost/127.0.0.1:37605" daemon prio=5 tid=102 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Object.wait(Native Method) at app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) at app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) at app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "java.util.concurrent.ThreadPoolExecutor$Worker@3d8ea566[State = -1, empty queue]" daemon prio=5 tid=218 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Socket Reader #1 for port 0" daemon prio=5 tid=161 terminated java.lang.Thread.State: TERMINATED at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) at app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) "qtp503631082-88-acceptor-0@406e3e45-ServerConnector@6106ba1a{HTTP/1.1, (http/1.1)}{localhost:44969}" daemon prio=3 tid=88 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) at java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) at java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) at app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) at app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) at app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data3)" daemon prio=5 tid=186 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Object.wait(Native Method) at app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) "IPC Server handler 4 on default port 44097" daemon prio=5 tid=108 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "IPC Server handler 4 on default port 33341" daemon prio=5 tid=174 terminated java.lang.Thread.State: TERMINATED at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "nioEventLoopGroup-7-3" prio=10 tid=10152 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) at app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) at app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) at app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "M:0;215bb12e999f:39621" daemon prio=5 tid=286 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) at app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) at app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) at app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) at app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) at app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1084/0x00007f3f04f9f000.run(Unknown Source) at app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) at app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) at app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) at app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) at app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) at app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) at app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) at app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) at app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) at app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) at app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) at app//org.apache.hadoop.hbase.master.region.MasterRegion.closeRegion(MasterRegion.java:132) at app//org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:205) at app//org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1819) at app//org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:631) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RPCClient-NioEventLoopGroup-6-16" daemon prio=5 tid=5988 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100" daemon prio=5 tid=35 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) "Async-Client-Retry-Timer-pool-0" daemon prio=5 tid=410 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) at app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39621" daemon prio=5 tid=279 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) at app//org.apache2024-11-23T17:36:28,704 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@531e55ae{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} .hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) "IPC Client (1549690208) connection to localhost/127.0.0.1:37605 from jenkins" daemon prio=5 tid=112 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Object.wait(Native Method) at app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) at app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) "Command processor" daemon prio=5 tid=135 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) at app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) "MiniHBaseClusterRegionServer-EventLoopGroup-4-3" daemon prio=10 tid=527 runnable java.lang.Thread.State: RUNNABLE at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Hadoop-Metrics-Updater-0" daemon prio=5 tid=164 terminated java.lang.Thread.State: TERMINATED at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=39621" daemon prio=5 tid=282 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) at java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) at app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) at app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) "regionserver/215bb12e999f:0.procedureResultReporter" daemon prio=5 tid=480 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) "RPCClient-NioEventLoopGroup-6-4" daemon prio=5 tid=1051 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RPCClient-NioEventLoopGroup-6-13" daemon prio=5 tid=1268 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "HMaster-EventLoopGroup-1-2" daemon prio=12024-11-23T17:36:28,704 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6d8e07b1{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T17:36:28,704 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 0 tid=432 runnable java.lang.Thread.State: RUNNABLE at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "CacheReplicationMonitor(431946690)" daemon prio=5 tid=75 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) at app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) "surefire-forkedjvm-command-thread" daemon prio=5 tid=18 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) at java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) at java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) at app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) at java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) at java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) at java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) at app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) at app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) at app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) at app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) at app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) at app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) at app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) at app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "nioEventLoopGroup-6-2" prio=10 tid=10148 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) at app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) at app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) at app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "pool-15-thread-1" daemon prio=5 tid=211 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server handler 1 on default port 34763" daemon prio=5 tid=139 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=39621" daemon prio=5 tid=285 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) at java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) at app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) at app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) "HMaster-EventLoopGroup-1-1" daemon prio=10 tid=256 runnable java.lang.Thread.State: RUNNABLE at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThr2024-11-23T17:36:28,704 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6583bccc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} eadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server handler 2 on default port 44097" daemon prio=5 tid=106 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "org.apache.hadoop.hdfs.PeerCache@232a855c" daemon prio=5 tid=362 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) at app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) at app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "pool-23-thread-1" daemon prio=5 tid=207 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server handler 3 on default port 33341" daemon prio=5 tid=173 terminated java.lang.Thread.State: TERMINATED at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data5/current/BP-931987288-172.17.0.2-1732382611165" daemon prio=5 tid=225 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:57294" daemon prio=5 tid=238 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) at app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) "RedundancyMonitor" daemon prio=5 tid=47 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) at java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) at app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@2661f6c3" daemon prio=5 tid=72 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "qtp503631082-89" daemon prio=5 tid=89 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "qtp501822267-123" daemon prio=5 tid=123 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "qtp2080160363-38" daemon prio=5 tid=38 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) at app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java2024-11-23T17:36:28,705 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@339990ad{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/hadoop.log.dir/,STOPPED} :543) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) at app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007f3f0442d2a8.run(Unknown Source) at app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server handler 0 on default port 34763" daemon prio=5 tid=138 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "Idle-Rpc-Conn-Sweeper-pool-0" daemon prio=5 tid=412 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Time-limited test-EventThread" daemon prio=5 tid=258 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) "Finalizer" daemon prio=8 tid=3 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/java.lang.Object.wait(Native Method) at java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) at java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) at java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) "NIOWorkerThread-1" daemon prio=5 tid=243 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "SessionTracker" daemon prio=5 tid=239 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) "NIOWorkerThread-4" daemon prio=5 tid=261 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server idle connection scanner for port 33341" daemon prio=5 tid=162 terminated java.lang.Thread.State: TERMINATED at java.base@17.0.11/java.lang.Object.wait(Native Method) at java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) at java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) "nioEventLoopGroup-7-2" prio=10 tid=10151 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) at app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) at app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) at app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:8402024-11-23T17:36:28,706 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. ) "NIOWorkerThread-15" daemon prio=5 tid=273 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data2/current/BP-931987288-172.17.0.2-1732382611165" daemon prio=5 tid=198 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "HMaster-EventLoopGroup-1-3" daemon prio=10 tid=433 runnable java.lang.Thread.State: RUNNABLE at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server idle connection scanner for port 37605" daemon prio=5 tid=56 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Object.wait(Native Method) at java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) at java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) "VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data2)" daemon prio=5 tid=187 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Object.wait(Native Method) at app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) "RPCClient-NioEventLoopGroup-6-11" daemon prio=5 tid=1265 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "MiniHBaseClusterRegionServer-EventLoopGroup-4-1" daemon prio=10 tid=311 runnable java.lang.Thread.State: RUNNABLE at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "surefire-forkedjvm-stream-flusher" daemon prio=5 tid=16 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RPCClient-NioEventLoopGroup-6-10" daemon prio=5 tid=1264 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorM2024-11-23T17:36:28,706 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] ap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39621" daemon prio=5 tid=281 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) at app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) "RPCClient-NioEventLoopGroup-6-1" daemon prio=5 tid=536 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server handler 4 on default port 34763" daemon prio=5 tid=142 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "IPC Server listener on 0" daemon prio=5 tid=160 terminated java.lang.Thread.State: TERMINATED at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) "SSL Certificates Store Monitor" daemon prio=5 tid=25 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/java.lang.Object.wait(Native Method) at java.base@17.0.11/java.lang.Object.wait(Object.java:338) at java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) at java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) "qtp501822267-121" daemon prio=5 tid=121 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) at app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) at app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007f3f0442d2a8.run(Unknown Source) at app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "pool-1-thread-1" daemon prio=5 tid=14 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) at java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) at java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "pool-6-thread-1" prio=5 tid=36 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Command processor" daemon prio=5 tid=167 terminated java.lang.Thread.State: TERMINATED at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) at app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) "Block report processor" daemon prio=5 tid=51 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) at app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) at app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) "refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data3/current/BP-931987288-172.17.0.2-1732382611165" daemon prio=5 tid=199 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "NIOWorkerThread-5" daemon prio=5 tid=263 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RPCClient-NioEventLoopGroup-6-3" daemon prio=5 tid=546 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "NIOServerCxnFactory.SelectorThread-0" daemon prio=5 tid=236 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) at app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) "MiniHBaseClusterRegionServer-EventLoopGroup-4-2" daemon prio=10 tid=517 runnable java.lang.Thread.State: RUNNABLE at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "nioEventLoopGroup-2-1" prio=10 tid=92 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) at app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "MiniHBaseClusterRegionServer-EventLoopGroup-3-2" da2024-11-23T17:36:28,706 WARN [BP-931987288-172.17.0.2-1732382611165 heartbeating to localhost/127.0.0.1:37605 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-23T17:36:28,707 WARN [BP-931987288-172.17.0.2-1732382611165 heartbeating to localhost/127.0.0.1:37605 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-931987288-172.17.0.2-1732382611165 (Datanode Uuid 751a80af-2f15-4df2-a58c-7825bba66825) service to localhost/127.0.0.1:37605 emon prio=10 tid=1081 runnable java.lang.Thread.State: RUNNABLE at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server handler 4 on default port 37605" daemon prio=5 tid=68 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data1/current/BP-931987288-172.17.0.2-1732382611165" daemon prio=5 tid=200 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "SyncThread:0" daemon prio=5 tid=240 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) "IPC Server handler 2 on default port 33341" daemon prio=5 tid=172 terminated java.lang.Thread.State: TERMINATED at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "region-location-3" daemon prio=5 tid=1755 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39621" daemon prio=5 tid=277 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) at java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) at app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) at app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) "pool-26-thread-1" prio=5 tid=120 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "NIOWorkerThread-13" daemon prio=5 tid=271 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Parameter Sending Thread for localhost/127.0.0.1:37605" daemon prio=5 tid=113 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) at java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) at app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "master/215bb12e999f:0:becomeActiveMaster-MemStoreChunkPool Statistics" daemon prio=5 tid=358 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "MiniHBaseClusterRegionServer-EventLoopGroup-3-3" daemon prio=10 tid=1145 runnable java.lang.Thread.State: RUNNABLE at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "NIOWorkerThread-12" daemon prio=5 tid=270 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "nioEventLoopGroup-6-3" prio=10 tid=10149 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) at app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) at app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) at app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "org.apache.hadoop.util.JvmPauseMonitor$Monitor@74abb991" daemon prio=5 tid=127 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data4/current/BP-931987288-172.17.0.2-1732382611165" daemon prio=5 tid=197 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "ProcessThread(sid:0 cport:57294):" daemon prio=5 tid=241 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) "master/215bb12e999f:0:becomeActiveMaster-MemStoreChunkPool Statistics" daemon prio=5 tid=360 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "region-location-1" daemon prio=5 tid=583 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server idle connection scanner for port 44097" daemon prio=5 tid=96 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Object.wait(Native Method) at java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) at java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) "BP-931987288-172.17.0.2-1732382611165 heartbeating to localhost/127.0.0.1:37605" daemon prio=5 tid=168 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/java.lang.Object.wait(Native Method) at app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) at app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) at app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "MiniHBaseClusterRegionServer-EventLoopGroup-5-2" daemon prio=10 tid=397 runnable java.lang.Thread.State: RUNNABLE at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RpcClient-timer-pool-0" daemon prio=5 tid=411 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) at app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data6/current/BP-931987288-172.17.0.2-1732382611165" daemon prio=5 tid=226 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server handler 3 on default port 37605" daemon prio=5 tid=67 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "Reference Handler" daemon prio=10 tid=2 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) at java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) at java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) "RequestThrottler" daemon prio=5 tid=242 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) "Time-limited test-SendThread(127.0.0.1:57294)" daemon prio=5 tid=257 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) at app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) at app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) "RPCClient-NioEventLoopGroup-6-8" daemon prio=5 tid=1209 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLo2024-11-23T17:36:28,707 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data3/current/BP-931987288-172.17.0.2-1732382611165 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T17:36:28,708 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data4/current/BP-931987288-172.17.0.2-1732382611165 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-23T17:36:28,708 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func op.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RPCClient-NioEventLoopGroup-6-9" daemon prio=5 tid=1210 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "java.util.concurrent.ThreadPoolExecutor$Worker@607ffc2e[State = -1, empty queue]" daemon prio=5 tid=215 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server handler 0 on default port 33341" daemon prio=5 tid=170 terminated java.lang.Thread.State: TERMINATED at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "Timer for 'JobHistoryServer' metrics system" daemon prio=5 tid=10144 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Object.wait(Native Method) at java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) at java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) "org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@32dcf04c" daemon prio=5 tid=85 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) at java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) at java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) at java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) at app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) at app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "pool-18-thread-1" prio=5 tid=86 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@43480010" daemon prio=5 tid=73 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "qtp2080160363-41-acceptor-0@661eade8-ServerConnector@188d52a2{HTTP/1.1, (http/1.1)}{localhost:44391}" daemon prio=3 tid=41 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) at java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) at java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) at app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) at app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) at app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server Responder" daemon prio=5 tid=57 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) at app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) at app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) "NIOWorkerThread-3" daemon prio=5 tid=260 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "qtp501822267-124" daemon prio=5 tid=124 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "BP-931987288-172.17.0.2-1732382611165 heartbeating to localhost/127.0.0.1:37605" daemon prio=5 tid=136 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Object.wait(Native Method) at app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) at app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) at app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "nioEventLoopGroup-4-1" prio=10 tid=126 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) at app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "qtp2080160363-37" daemon prio=5 tid=37 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) at app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) at app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007f3f0442d2a8.run(Unknown Source) at app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "pool-29-thread-1" prio=5 tid=137 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@3019d844" daemon prio=5 tid=61 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Time-limited test.named-queue-events-pool-0" daemon prio=5 tid=288 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) at app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) at app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) at app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RPCClient-NioEventLoopGroup-6-2" daemon prio=5 tid=543 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "NIOWorkerThread-16" daemon prio=5 tid=274 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "main" prio=5 tid=1 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/java.lang.Thread.dumpThreads(Native Method) at java.base@17.0.11/java.lang.Thread.getAllStackTraces(Thread.java:1671) at app//org.apache.hadoop.hbase.TimedOutTestsListener.buildThreadDump(TimedOutTestsListener.java:92) at app//org.apache.hadoop.hbase.TimedOutTestsListener.buildThreadDiagnosticString(TimedOutTestsListener.java:78) at app//org.apache.hadoop.hbase.TimedOutTestsListener.testFailure(TimedOutTestsListener.java:65) at app//org.junit.runner.notification.SynchronizedRunListener.testFailure(SynchronizedRunListener.java:94) at app//org.junit.runner.notification.RunNotifier$6.notifyListener(RunNotifier.java:177) at app//org.junit.runner.notification.RunNotifier$SafeNotifier.run(RunNotifier.java:72) at app//org.junit.runner.notification.RunNotifier.fireTestFailures(RunNotifier.java:173) at app//org.junit.runner.notification.RunNotifier.fireTestFailure(RunNotifier.java:167) at app//org.apache.maven.surefire.common.junit4.Notifier.fireTestFailure(Notifier.java:100) at app//org.junit.internal.runners.model.EachTestNotifier.addFailure(EachTestNotifier.java:23) at app//org.junit.internal.runners.model.EachTestNotifier.addMultipleFailureException(EachTestNotifier.java:29) at app//org.junit.internal.runners.model.EachTestNotifier.addFailure(EachTestNotifier.java:21) at app//org.junit.runners.ParentRunner.run(ParentRunner.java:419) at app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) at app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) at app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) at app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) at app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) at app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) at app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) at app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) "IPC Server handler 0 on default port 37605" daemon prio=5 tid=64 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "RPCClient-NioEventLoopGroup-6-7" daemon prio=5 tid=1208 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "region-location-2" daemon prio=5 tid=585 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Session-HouseKeeper-347aa587-1" prio=5 tid=125 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RPCClient-NioEventLoopGroup-6-5" daemon prio=5 tid=1143 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server Responder" daemon prio=5 tid=131 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) at app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) at app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) "nioEventLoopGroup-6-1" prio=10 tid=158 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) at app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) at app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) at app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "qtp2080160363-40" daemon prio=5 tid=40 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) at app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) at app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007f3f0442d2a8.run(Unknown Source) at app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Hadoop-Metrics-Updater-0" daemon prio=5 tid=98 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39621" daemon prio=5 tid=280 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) at app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) "qtp2080160363-44" daemon prio=5 tid=44 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server handler 2 on default port 34763" daemon prio=5 tid=140 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "NIOWorkerThread-11" daemon prio=5 tid=269 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "master:store-WAL-Roller" daemon prio=5 tid=380 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Object.wait(Native Method) at app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) "IPC Server handler 1 on default port 37605" daemon prio=5 tid=65 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "Command processor" daemon prio=5 tid=101 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) at app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) "Container metrics unregistration" daemon prio=5 tid=1626 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/java.lang.Object.wait(Native Method) at java.base@17.0.11/java.lang.Object.wait(Object.java:338) at java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) at java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) "pool-7-thread-1" prio=5 tid=46 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "DatanodeAdminMonitor-0" daemon prio=5 tid=62 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@dd757eb" daemon prio=5 tid=74 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server handler 1 on default port 44097" daemon prio=5 tid=105 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "MutableQuantiles-0" daemon prio=5 tid=987 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Monitor thread for TaskMonitor" daemon prio=5 tid=356 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server handler 3 on default port 34763" daemon prio=5 tid=141 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "Signal Dispatcher" daemon prio=9 tid=4 runnable java.lang.Thread.State: RUNNABLE "RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=39621" daemon prio=5 tid=283 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) at java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) at app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) at app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) "NIOServerCxnFactory.SelectorThread-1" daemon prio=5 tid=237 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) at app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) "IPC Server Responder" daemon prio=5 tid=97 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) at app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) at app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) "org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@36b49887" daemon prio=5 tid=49 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Socket Reader #1 for port 0" daemon prio=5 tid=129 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) at app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) "VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data1)" daemon prio=5 tid=185 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Object.wait(Native Method) at app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) "Hadoop-Metrics-Updater-0" daemon prio=5 tid=58 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "weak-ref-cleaner-strictcontextstorage" daemon prio=1 tid=254 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/java.lang.Object.wait(Native Method) at java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) at java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) at app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39621" daemon prio=5 tid=278 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) at java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) at app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) at app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) "RegionServerTracker-0" daemon prio=5 tid=458 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "SnapshotHandlerChoreCleaner" daemon prio=5 tid=423 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server idle connection scanner for port 34763" daemon prio=5 tid=130 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Object.wait(Native Method) at java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) at java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) "ConnnectionExpirer" daemon prio=5 tid=235 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) "NIOWorkerThread-8" daemon prio=5 tid=266 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "NIOWorkerThread-9" daemon prio=5 tid=267 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "NIOWorkerThread-10" daemon prio=5 tid=268 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "qtp2080160363-39" daemon prio=5 tid=39 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) at app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) at app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007f3f0442d2a8.run(Unknown Source) at app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "MiniHBaseClusterRegionServer-EventLoopGroup-5-3" daemon prio=10 tid=398 runnable java.lang.Thread.State: RUNNABLE at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/cluster_ac6953f3-127b-f65c-4dab-63c70846597b/data/data4)" daemon prio=5 tid=188 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Object.wait(Native Method) at app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) "MarkedDeleteBlockScrubberThread" daemon prio=5 tid=48 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "org.apache.hadoop.util.JvmPauseMonitor$Monitor@283d96cd" daemon prio=5 tid=93 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "zk-permission-watcher-pool-0" daemon prio=5 tid=1097 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "region-location-4" daemon prio=5 tid=1758 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server handler 3 on default port 44097" daemon prio=5 tid=107 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) at app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) at app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) "RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39621" daemon prio=5 tid=276 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) at java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) at app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) at app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) "Notification Thread" daemon prio=9 tid=13 runnable java.lang.Thread.State: RUNNABLE "regionserver/215bb12e999f:0.procedureResultReporter" daemon prio=5 tid=482 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) "qtp503631082-87" daemon prio=5 tid=87 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) at app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) at app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007f3f0442d2a8.run(Unknown Source) at app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "NIOWorkerThread-2" daemon prio=5 tid=259 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "RPCClient-NioEventLoopGroup-6-15" daemon prio=5 tid=5987 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "zk-event-processor-pool-0" daemon prio=5 tid=262 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@325017cd" daemon prio=5 tid=119 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) at java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) at java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) at java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) at app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) at app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "qtp503631082-90" daemon prio=5 tid=90 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "pool-12-thread-1" prio=5 tid=69 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server listener on 0" daemon prio=5 tid=128 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) "HBase-Metrics2-1" daemon prio=5 tid=255 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "IPC Server Responder" daemon prio=5 tid=163 terminated java.lang.Thread.State: TERMINATED at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) at app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) at app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) "RPCClient-NioEventLoopGroup-6-14" daemon prio=5 tid=3757 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) at app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Common-Cleaner" daemon prio=8 tid=12 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Object.wait(Native Method) at java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) at java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) at java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) "NIOWorkerThread-6" daemon prio=5 tid=264 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "nioEventLoopGroup-7-1" prio=10 tid=10150 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) at app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) at app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) at app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "FsDatasetAsyncDiskServiceFixer" daemon prio=5 tid=234 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/java.lang.Thread.sleep(Native Method) at app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) "regionserver/215bb12e999f:0.procedureResultReporter" daemon prio=5 tid=481 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) "Session-HouseKeeper-543f2336-1" prio=5 tid=45 timed_waiting java.lang.Thread.State: TIMED_WAITING at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "pool-1-thread-2" daemon prio=5 tid=15 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) at java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) at java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "Socket Reader #1 for port 0" daemon prio=5 tid=55 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) at app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) "IPC Server listener on 0" daemon prio=5 tid=54 runnable java.lang.Thread.State: RUNNABLE at java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) at java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) at java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) at java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) at app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) "NIOWorkerThread-14" daemon prio=5 tid=272 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) at java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "FSEditLogAsync" daemon prio=5 tid=53 in Object.wait() java.lang.Thread.State: WAITING (on object monitor) at java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) at java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) at java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) at java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) at java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) at app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) at app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) "MiniHBaseClusterRegionServer-EventLoopGroup-5-1" daemon prio=10 tid=333 runnable java.lang.Thread.State: RUNNABLE at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) at app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base@17.0.11/java.lang.Thread.run(Thread.java:840) 2024-11-23T17:36:28,713 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1ead95b8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-23T17:36:28,714 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6106ba1a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-23T17:36:28,714 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-23T17:36:28,714 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@413b124e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-23T17:36:28,714 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3622d218{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/80e469e8-4787-18e7-cac5-686068f0a9fe/hadoop.log.dir/,STOPPED}